Illumos Crypto Port module added to enable native encryption in zfs

A port of the Illumos Crypto Framework to a Linux kernel module (found
in module/icp). This is needed to do the actual encryption work. We cannot
use the Linux kernel's built in crypto api because it is only exported to
GPL-licensed modules. Having the ICP also means the crypto code can run on
any of the other kernels under OpenZFS. I ended up porting over most of the
internals of the framework, which means that porting over other API calls (if
we need them) should be fairly easy. Specifically, I have ported over the API
functions related to encryption, digests, macs, and crypto templates. The ICP
is able to use assembly-accelerated encryption on amd64 machines and AES-NI
instructions on Intel chips that support it. There are place-holder
directories for similar assembly optimizations for other architectures
(although they have not been written).

Signed-off-by: Tom Caputi <tcaputi@datto.com>
Signed-off-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #4329
This commit is contained in:
Tom Caputi 2016-05-12 10:51:24 -04:00 committed by Brian Behlendorf
parent be88e733a6
commit 0b04990a5d
90 changed files with 35834 additions and 80 deletions

1
.gitignore vendored
View File

@ -21,6 +21,7 @@
*.swp
.deps
.libs
.dirstamp
.DS_Store
modules.order
Makefile

22
config/always-arch.m4 Normal file
View File

@ -0,0 +1,22 @@
dnl #
dnl # Set the target arch for libspl atomic implementation and the icp
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_ARCH], [
AC_MSG_CHECKING(for target asm dir)
TARGET_ARCH=`echo ${target_cpu} | sed -e s/i.86/i386/`
case $TARGET_ARCH in
i386|x86_64)
TARGET_ASM_DIR=asm-${TARGET_ARCH}
;;
*)
TARGET_ASM_DIR=asm-generic
;;
esac
AC_SUBST([TARGET_ASM_DIR])
AM_CONDITIONAL([TARGET_ASM_X86_64], test $TARGET_ASM_DIR = asm-x86_64)
AM_CONDITIONAL([TARGET_ASM_I386], test $TARGET_ASM_DIR = asm-i386)
AM_CONDITIONAL([TARGET_ASM_GENERIC], test $TARGET_ASM_DIR = asm-generic)
AC_MSG_RESULT([$TARGET_ASM_DIR])
])

View File

@ -1,19 +0,0 @@
dnl #
dnl # Set the target arch for libspl atomic implementation
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_ARCH], [
AC_MSG_CHECKING(for target asm dir)
TARGET_ARCH=`echo ${target_cpu} | sed -e s/i.86/i386/`
case $TARGET_ARCH in
i386|x86_64)
TARGET_ASM_DIR=asm-${TARGET_ARCH}
;;
*)
TARGET_ASM_DIR=asm-generic
;;
esac
AC_SUBST([TARGET_ASM_DIR])
AC_MSG_RESULT([$TARGET_ASM_DIR])
])

View File

@ -7,7 +7,6 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [
ZFS_AC_CONFIG_USER_SYSTEMD
ZFS_AC_CONFIG_USER_SYSVINIT
ZFS_AC_CONFIG_USER_DRACUT
ZFS_AC_CONFIG_USER_ARCH
ZFS_AC_CONFIG_USER_ZLIB
ZFS_AC_CONFIG_USER_LIBUUID
ZFS_AC_CONFIG_USER_LIBTIRPC

View File

@ -64,12 +64,10 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
ZFS_AC_CONFIG_ALWAYS_NO_UNUSED_BUT_SET_VARIABLE
ZFS_AC_CONFIG_ALWAYS_NO_BOOL_COMPARE
ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD
ZFS_AC_CONFIG_ALWAYS_ARCH
])
AC_DEFUN([ZFS_AC_CONFIG], [
TARGET_ASM_DIR=asm-generic
AC_SUBST(TARGET_ASM_DIR)
ZFS_CONFIG=all
AC_ARG_WITH([config],
AS_HELP_STRING([--with-config=CONFIG],

View File

@ -39,7 +39,7 @@ AC_CONFIG_MACRO_DIR([config])
AC_CANONICAL_SYSTEM
AM_MAINTAINER_MODE
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
AM_INIT_AUTOMAKE
AM_INIT_AUTOMAKE([subdir-objects])
AC_CONFIG_HEADERS([zfs_config.h], [
(mv zfs_config.h zfs_config.h.tmp &&
awk -f ${ac_srcdir}/config/config.awk zfs_config.h.tmp >zfs_config.h &&
@ -85,6 +85,7 @@ AC_CONFIG_FILES([
lib/libspl/include/util/Makefile
lib/libavl/Makefile
lib/libefi/Makefile
lib/libicp/Makefile
lib/libnvpair/Makefile
lib/libunicode/Makefile
lib/libuutil/Makefile
@ -125,12 +126,14 @@ AC_CONFIG_FILES([
module/zcommon/Makefile
module/zfs/Makefile
module/zpios/Makefile
module/icp/Makefile
include/Makefile
include/linux/Makefile
include/sys/Makefile
include/sys/fs/Makefile
include/sys/fm/Makefile
include/sys/fm/fs/Makefile
include/sys/crypto/Makefile
scripts/Makefile
scripts/zpios-profile/Makefile
scripts/zpios-test/Makefile

View File

@ -34,27 +34,8 @@ cp --recursive include "$KERNEL_DIR/include/zfs"
cp --recursive module "$KERNEL_DIR/fs/zfs"
cp zfs_config.h "$KERNEL_DIR/"
adjust_obj_paths()
{
local FILE="$1"
local LINE OBJPATH
while IFS='' read -r LINE
do
OBJPATH="${LINE#\$(MODULE)-objs += }"
if [ "$OBJPATH" = "$LINE" ]
then
echo "$LINE"
else
echo "\$(MODULE)-objs += ${OBJPATH##*/}"
fi
done < "$FILE" > "$FILE.new"
mv "$FILE.new" "$FILE"
}
for MODULE in "${MODULES[@]}"
do
adjust_obj_paths "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
sed -i.bak '/obj =/d' "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
sed -i.bak '/src =/d' "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
done

View File

@ -1,4 +1,4 @@
SUBDIRS = fm fs
SUBDIRS = fm fs crypto
COMMON_H = \
$(top_srcdir)/include/sys/arc.h \

View File

@ -0,0 +1,20 @@
COMMON_H = \
$(top_srcdir)/include/sys/crypto/api.h \
$(top_srcdir)/include/sys/crypto/common.h \
$(top_srcdir)/include/sys/crypto/icp.h
KERNEL_H =
USER_H =
EXTRA_DIST = $(COMMON_H) $(KERNEL_H) $(USER_H)
if CONFIG_USER
libzfsdir = $(includedir)/libzfs/sys/crypto
libzfs_HEADERS = $(COMMON_H) $(USER_H)
endif
if CONFIG_KERNEL
kerneldir = @prefix@/src/zfs-$(VERSION)/include/sys/crypto
kernel_HEADERS = $(COMMON_H) $(KERNEL_H)
endif

425
include/sys/crypto/api.h Normal file
View File

@ -0,0 +1,425 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_API_H
#define _SYS_CRYPTO_API_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
typedef long crypto_req_id_t;
typedef void *crypto_bc_t;
typedef void *crypto_context_t;
typedef void *crypto_ctx_template_t;
typedef uint32_t crypto_call_flag_t;
/* crypto_call_flag's values */
#define CRYPTO_ALWAYS_QUEUE 0x00000001 /* ALWAYS queue the req. */
#define CRYPTO_NOTIFY_OPDONE 0x00000002 /* Notify intermediate steps */
#define CRYPTO_SKIP_REQID 0x00000004 /* Skip request ID generation */
#define CRYPTO_RESTRICTED 0x00000008 /* cannot use restricted prov */
typedef struct {
crypto_call_flag_t cr_flag;
void (*cr_callback_func)(void *, int);
void *cr_callback_arg;
crypto_req_id_t cr_reqid;
} crypto_call_req_t;
/*
* Returns the mechanism type corresponding to a mechanism name.
*/
#define CRYPTO_MECH_INVALID ((uint64_t)-1)
extern crypto_mech_type_t crypto_mech2id(crypto_mech_name_t name);
/*
* Create and destroy context templates.
*/
extern int crypto_create_ctx_template(crypto_mechanism_t *mech,
crypto_key_t *key, crypto_ctx_template_t *tmpl, int kmflag);
extern void crypto_destroy_ctx_template(crypto_ctx_template_t tmpl);
/*
* Single and multi-part digest operations.
*/
extern int crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data,
crypto_data_t *digest, crypto_call_req_t *cr);
extern int crypto_digest_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
crypto_call_req_t *);
extern int crypto_digest_init(crypto_mechanism_t *mech, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_digest_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_context_t *, crypto_call_req_t *);
extern int crypto_digest_update(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_digest_final(crypto_context_t ctx, crypto_data_t *digest,
crypto_call_req_t *cr);
/*
* Single and multi-part MAC operations.
*/
extern int crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
crypto_call_req_t *cr);
extern int crypto_mac_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_data_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
crypto_call_req_t *cr);
extern int crypto_mac_verify_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_data_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr);
extern int crypto_mac_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *);
extern int crypto_mac_update(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_mac_final(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
/*
* Single and multi-part sign with private key operations.
*/
extern int crypto_sign(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *data, crypto_ctx_template_t tmpl,
crypto_data_t *signature, crypto_call_req_t *cr);
extern int crypto_sign_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_sign_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr);
extern int crypto_sign_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *);
extern int crypto_sign_update(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_sign_final(crypto_context_t ctx, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_sign_recover_init_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_ctx_template_t tmpl, crypto_context_t *, crypto_call_req_t *);
extern int crypto_sign_recover(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_sign_recover_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
/*
* Single and multi-part verify with public key operations.
*/
extern int crypto_verify(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_verify_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_verify_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr);
extern int crypto_verify_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *);
extern int crypto_verify_update(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_verify_final(crypto_context_t ctx, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_verify_recover_init_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_ctx_template_t tmpl, crypto_context_t *, crypto_call_req_t *);
extern int crypto_verify_recover(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *signature, crypto_ctx_template_t tmpl, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_verify_recover_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
/*
* Single and multi-part encryption operations.
*/
extern int crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext,
crypto_call_req_t *cr);
extern int crypto_encrypt_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_data_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_encrypt_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr);
extern int crypto_encrypt_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *);
extern int crypto_encrypt_update(crypto_context_t ctx,
crypto_data_t *plaintext, crypto_data_t *ciphertext,
crypto_call_req_t *cr);
extern int crypto_encrypt_final(crypto_context_t ctx,
crypto_data_t *ciphertext, crypto_call_req_t *cr);
/*
* Single and multi-part decryption operations.
*/
extern int crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext,
crypto_call_req_t *cr);
extern int crypto_decrypt_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_data_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_decrypt_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_decrypt_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *);
extern int crypto_decrypt_update(crypto_context_t ctx,
crypto_data_t *ciphertext, crypto_data_t *plaintext,
crypto_call_req_t *cr);
extern int crypto_decrypt_final(crypto_context_t ctx, crypto_data_t *plaintext,
crypto_call_req_t *cr);
/*
* Single and multi-part encrypt/MAC dual operations.
*/
extern int crypto_encrypt_mac(crypto_mechanism_t *encr_mech,
crypto_mechanism_t *mac_mech, crypto_data_t *pt,
crypto_key_t *encr_key, crypto_key_t *mac_key,
crypto_ctx_template_t encr_tmpl, crypto_ctx_template_t mac_tmpl,
crypto_dual_data_t *ct, crypto_data_t *mac, crypto_call_req_t *cr);
extern int crypto_encrypt_mac_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_mechanism_t *, crypto_data_t *,
crypto_key_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_ctx_template_t, crypto_dual_data_t *, crypto_data_t *,
crypto_call_req_t *);
extern int crypto_encrypt_mac_init(crypto_mechanism_t *encr_mech,
crypto_mechanism_t *mac_mech, crypto_key_t *encr_key,
crypto_key_t *mac_key, crypto_ctx_template_t encr_tmpl,
crypto_ctx_template_t mac_tmpl, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_encrypt_mac_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_mechanism_t *, crypto_key_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_ctx_template_t, crypto_context_t *,
crypto_call_req_t *);
extern int crypto_encrypt_mac_update(crypto_context_t ctx,
crypto_data_t *pt, crypto_dual_data_t *ct, crypto_call_req_t *cr);
extern int crypto_encrypt_mac_final(crypto_context_t ctx,
crypto_dual_data_t *ct, crypto_data_t *mac, crypto_call_req_t *cr);
/*
* Single and multi-part MAC/decrypt dual operations.
*/
extern int crypto_mac_decrypt(crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_decrypt_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *mac_mech, crypto_mechanism_t *decr_mech,
crypto_dual_data_t *ct, crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_verify_decrypt(crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_verify_decrypt_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_decrypt_init(crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_key_t *mac_key,
crypto_key_t *decr_key, crypto_ctx_template_t mac_tmpl,
crypto_ctx_template_t decr_tmpl, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_mac_decrypt_init_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_key_t *mac_key,
crypto_key_t *decr_key, crypto_ctx_template_t mac_tmpl,
crypto_ctx_template_t decr_tmpl, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_mac_decrypt_update(crypto_context_t ctx,
crypto_dual_data_t *ct, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_decrypt_final(crypto_context_t ctx, crypto_data_t *mac,
crypto_data_t *pt, crypto_call_req_t *cr);
/* Session Management */
extern int crypto_session_open(crypto_provider_t, crypto_session_id_t *,
crypto_call_req_t *);
extern int crypto_session_close(crypto_provider_t, crypto_session_id_t,
crypto_call_req_t *);
extern int crypto_session_login(crypto_provider_t, crypto_session_id_t,
crypto_user_type_t, char *, size_t, crypto_call_req_t *);
extern int crypto_session_logout(crypto_provider_t, crypto_session_id_t,
crypto_call_req_t *);
/* Object Management */
extern int crypto_object_copy(crypto_provider_t, crypto_session_id_t,
crypto_object_id_t, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_object_create(crypto_provider_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_call_req_t *);
extern int crypto_object_destroy(crypto_provider_t, crypto_session_id_t,
crypto_object_id_t, crypto_call_req_t *);
extern int crypto_object_get_attribute_value(crypto_provider_t,
crypto_session_id_t, crypto_object_id_t, crypto_object_attribute_t *,
uint_t, crypto_call_req_t *);
extern int crypto_object_get_size(crypto_provider_t, crypto_session_id_t,
crypto_object_id_t, size_t *, crypto_call_req_t *);
extern int crypto_object_find_final(crypto_provider_t, void *,
crypto_call_req_t *);
extern int crypto_object_find_init(crypto_provider_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, void **, crypto_call_req_t *);
extern int crypto_object_find(crypto_provider_t, void *, crypto_object_id_t *,
uint_t *, uint_t, crypto_call_req_t *);
extern int crypto_object_set_attribute_value(crypto_provider_t,
crypto_session_id_t, crypto_object_id_t, crypto_object_attribute_t *,
uint_t, crypto_call_req_t *);
/* Key Management */
extern int crypto_key_derive(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
uint_t, crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_key_generate(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_key_generate_pair(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_key_unwrap(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, uchar_t *, size_t *,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_call_req_t *);
extern int crypto_key_wrap(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_id_t *, uchar_t *,
size_t *, crypto_call_req_t *);
extern int crypto_key_check_prov(crypto_provider_t, crypto_mechanism_t *mech,
crypto_key_t *key);
extern int crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key);
/*
* Routines to cancel a single asynchronous request or all asynchronous
* requests associated with a particular context.
*/
extern void crypto_cancel_req(crypto_req_id_t req);
extern void crypto_cancel_ctx(crypto_context_t ctx);
/*
* crypto_get_mech_list(9F) allocates and returns the list of currently
* supported cryptographic mechanisms.
*/
extern crypto_mech_name_t *crypto_get_mech_list(uint_t *count, int kmflag);
extern void crypto_free_mech_list(crypto_mech_name_t *mech_names,
uint_t count);
extern crypto_provider_t crypto_get_provider(char *, char *, char *);
extern int crypto_get_provinfo(crypto_provider_t, crypto_provider_ext_info_t *);
extern void crypto_release_provider(crypto_provider_t);
/*
* A kernel consumer can request to be notified when some particular event
* occurs. The valid events, callback function type, and functions to
* be called to register or unregister for notification are defined below.
*/
#define CRYPTO_EVENT_MECHS_CHANGED 0x00000001
#define CRYPTO_EVENT_PROVIDER_REGISTERED 0x00000002
#define CRYPTO_EVENT_PROVIDER_UNREGISTERED 0x00000004
typedef enum {
CRYPTO_MECH_ADDED = 1,
CRYPTO_MECH_REMOVED
} crypto_event_change_t;
/* The event_arg argument structure for CRYPTO_EVENT_PROVIDERS_CHANGE event */
typedef struct crypto_notify_event_change {
crypto_mech_name_t ec_mech_name;
crypto_provider_type_t ec_provider_type;
crypto_event_change_t ec_change;
} crypto_notify_event_change_t;
typedef void *crypto_notify_handle_t;
typedef void (*crypto_notify_callback_t)(uint32_t event_mask, void *event_arg);
extern crypto_notify_handle_t crypto_notify_events(
crypto_notify_callback_t nf, uint32_t event_mask);
extern void crypto_unnotify_events(crypto_notify_handle_t);
/*
* crypto_bufcall(9F) group of routines.
*/
extern crypto_bc_t crypto_bufcall_alloc(void);
extern int crypto_bufcall_free(crypto_bc_t bc);
extern int crypto_bufcall(crypto_bc_t bc, void (*func)(void *arg), void *arg);
extern int crypto_unbufcall(crypto_bc_t bc);
/*
* To obtain the list of key size ranges supported by a mechanism.
*/
#define CRYPTO_MECH_USAGE_ENCRYPT 0x00000001
#define CRYPTO_MECH_USAGE_DECRYPT 0x00000002
#define CRYPTO_MECH_USAGE_MAC 0x00000004
typedef uint32_t crypto_mech_usage_t;
typedef struct crypto_mechanism_info {
size_t mi_min_key_size;
size_t mi_max_key_size;
crypto_keysize_unit_t mi_keysize_unit; /* for mi_xxx_key_size */
crypto_mech_usage_t mi_usage;
} crypto_mechanism_info_t;
#ifdef _SYSCALL32
typedef struct crypto_mechanism_info32 {
size32_t mi_min_key_size;
size32_t mi_max_key_size;
crypto_keysize_unit_t mi_keysize_unit; /* for mi_xxx_key_size */
crypto_mech_usage_t mi_usage;
} crypto_mechanism_info32_t;
#endif /* _SYSCALL32 */
extern int crypto_get_all_mech_info(crypto_mech_type_t,
crypto_mechanism_info_t **, uint_t *, int);
extern void crypto_free_all_mech_info(crypto_mechanism_info_t *, uint_t);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_API_H */

583
include/sys/crypto/common.h Normal file
View File

@ -0,0 +1,583 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
#ifndef _SYS_CRYPTO_COMMON_H
#define _SYS_CRYPTO_COMMON_H
/*
* Header file for the common data structures of the cryptographic framework
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
/* Cryptographic Mechanisms */
#define CRYPTO_MAX_MECH_NAME 32
typedef char crypto_mech_name_t[CRYPTO_MAX_MECH_NAME];
typedef uint64_t crypto_mech_type_t;
typedef struct crypto_mechanism {
crypto_mech_type_t cm_type; /* mechanism type */
caddr_t cm_param; /* mech. parameter */
size_t cm_param_len; /* mech. parameter len */
} crypto_mechanism_t;
#ifdef _SYSCALL32
typedef struct crypto_mechanism32 {
crypto_mech_type_t cm_type; /* mechanism type */
caddr32_t cm_param; /* mech. parameter */
size32_t cm_param_len; /* mech. parameter len */
} crypto_mechanism32_t;
#endif /* _SYSCALL32 */
/* CK_AES_CTR_PARAMS provides parameters to the CKM_AES_CTR mechanism */
typedef struct CK_AES_CTR_PARAMS {
ulong_t ulCounterBits;
uint8_t cb[16];
} CK_AES_CTR_PARAMS;
/* CK_AES_CCM_PARAMS provides parameters to the CKM_AES_CCM mechanism */
typedef struct CK_AES_CCM_PARAMS {
ulong_t ulMACSize;
ulong_t ulNonceSize;
ulong_t ulAuthDataSize;
ulong_t ulDataSize; /* used for plaintext or ciphertext */
uchar_t *nonce;
uchar_t *authData;
} CK_AES_CCM_PARAMS;
/* CK_AES_GCM_PARAMS provides parameters to the CKM_AES_GCM mechanism */
typedef struct CK_AES_GCM_PARAMS {
uchar_t *pIv;
ulong_t ulIvLen;
ulong_t ulIvBits;
uchar_t *pAAD;
ulong_t ulAADLen;
ulong_t ulTagBits;
} CK_AES_GCM_PARAMS;
/* CK_AES_GMAC_PARAMS provides parameters to the CKM_AES_GMAC mechanism */
typedef struct CK_AES_GMAC_PARAMS {
uchar_t *pIv;
uchar_t *pAAD;
ulong_t ulAADLen;
} CK_AES_GMAC_PARAMS;
/*
* CK_ECDH1_DERIVE_PARAMS provides the parameters to the
* CKM_ECDH1_KEY_DERIVE mechanism
*/
typedef struct CK_ECDH1_DERIVE_PARAMS {
ulong_t kdf;
ulong_t ulSharedDataLen;
uchar_t *pSharedData;
ulong_t ulPublicDataLen;
uchar_t *pPublicData;
} CK_ECDH1_DERIVE_PARAMS;
#ifdef _SYSCALL32
/* needed for 32-bit applications running on 64-bit kernels */
typedef struct CK_AES_CTR_PARAMS32 {
uint32_t ulCounterBits;
uint8_t cb[16];
} CK_AES_CTR_PARAMS32;
/* needed for 32-bit applications running on 64-bit kernels */
typedef struct CK_AES_CCM_PARAMS32 {
uint32_t ulMACSize;
uint32_t ulNonceSize;
uint32_t ulAuthDataSize;
uint32_t ulDataSize;
caddr32_t nonce;
caddr32_t authData;
} CK_AES_CCM_PARAMS32;
/* needed for 32-bit applications running on 64-bit kernels */
typedef struct CK_AES_GCM_PARAMS32 {
caddr32_t pIv;
uint32_t ulIvLen;
uint32_t ulIvBits;
caddr32_t pAAD;
uint32_t ulAADLen;
uint32_t ulTagBits;
} CK_AES_GCM_PARAMS32;
/* needed for 32-bit applications running on 64-bit kernels */
typedef struct CK_AES_GMAC_PARAMS32 {
caddr32_t pIv;
caddr32_t pAAD;
uint32_t ulAADLen;
} CK_AES_GMAC_PARAMS32;
typedef struct CK_ECDH1_DERIVE_PARAMS32 {
uint32_t kdf;
uint32_t ulSharedDataLen;
caddr32_t pSharedData;
uint32_t ulPublicDataLen;
caddr32_t pPublicData;
} CK_ECDH1_DERIVE_PARAMS32;
#endif /* _SYSCALL32 */
/*
* The measurement unit bit flag for a mechanism's minimum or maximum key size.
* The unit are mechanism dependent. It can be in bits or in bytes.
*/
typedef uint32_t crypto_keysize_unit_t;
/*
* The following bit flags are valid in cm_mech_flags field in
* the crypto_mech_info_t structure of the SPI.
*
* Only the first two bit flags are valid in mi_keysize_unit
* field in the crypto_mechanism_info_t structure of the API.
*/
#define CRYPTO_KEYSIZE_UNIT_IN_BITS 0x00000001
#define CRYPTO_KEYSIZE_UNIT_IN_BYTES 0x00000002
#define CRYPTO_CAN_SHARE_OPSTATE 0x00000004 /* supports sharing */
/* Mechanisms supported out-of-the-box */
#define SUN_CKM_MD4 "CKM_MD4"
#define SUN_CKM_MD5 "CKM_MD5"
#define SUN_CKM_MD5_HMAC "CKM_MD5_HMAC"
#define SUN_CKM_MD5_HMAC_GENERAL "CKM_MD5_HMAC_GENERAL"
#define SUN_CKM_SHA1 "CKM_SHA_1"
#define SUN_CKM_SHA1_HMAC "CKM_SHA_1_HMAC"
#define SUN_CKM_SHA1_HMAC_GENERAL "CKM_SHA_1_HMAC_GENERAL"
#define SUN_CKM_SHA256 "CKM_SHA256"
#define SUN_CKM_SHA256_HMAC "CKM_SHA256_HMAC"
#define SUN_CKM_SHA256_HMAC_GENERAL "CKM_SHA256_HMAC_GENERAL"
#define SUN_CKM_SHA384 "CKM_SHA384"
#define SUN_CKM_SHA384_HMAC "CKM_SHA384_HMAC"
#define SUN_CKM_SHA384_HMAC_GENERAL "CKM_SHA384_HMAC_GENERAL"
#define SUN_CKM_SHA512 "CKM_SHA512"
#define SUN_CKM_SHA512_HMAC "CKM_SHA512_HMAC"
#define SUN_CKM_SHA512_HMAC_GENERAL "CKM_SHA512_HMAC_GENERAL"
#define SUN_CKM_SHA512_224 "CKM_SHA512_224"
#define SUN_CKM_SHA512_256 "CKM_SHA512_256"
#define SUN_CKM_DES_CBC "CKM_DES_CBC"
#define SUN_CKM_DES3_CBC "CKM_DES3_CBC"
#define SUN_CKM_DES_ECB "CKM_DES_ECB"
#define SUN_CKM_DES3_ECB "CKM_DES3_ECB"
#define SUN_CKM_BLOWFISH_CBC "CKM_BLOWFISH_CBC"
#define SUN_CKM_BLOWFISH_ECB "CKM_BLOWFISH_ECB"
#define SUN_CKM_AES_CBC "CKM_AES_CBC"
#define SUN_CKM_AES_ECB "CKM_AES_ECB"
#define SUN_CKM_AES_CTR "CKM_AES_CTR"
#define SUN_CKM_AES_CCM "CKM_AES_CCM"
#define SUN_CKM_AES_GCM "CKM_AES_GCM"
#define SUN_CKM_AES_GMAC "CKM_AES_GMAC"
#define SUN_CKM_AES_CFB128 "CKM_AES_CFB128"
#define SUN_CKM_RC4 "CKM_RC4"
#define SUN_CKM_RSA_PKCS "CKM_RSA_PKCS"
#define SUN_CKM_RSA_X_509 "CKM_RSA_X_509"
#define SUN_CKM_MD5_RSA_PKCS "CKM_MD5_RSA_PKCS"
#define SUN_CKM_SHA1_RSA_PKCS "CKM_SHA1_RSA_PKCS"
#define SUN_CKM_SHA256_RSA_PKCS "CKM_SHA256_RSA_PKCS"
#define SUN_CKM_SHA384_RSA_PKCS "CKM_SHA384_RSA_PKCS"
#define SUN_CKM_SHA512_RSA_PKCS "CKM_SHA512_RSA_PKCS"
#define SUN_CKM_EC_KEY_PAIR_GEN "CKM_EC_KEY_PAIR_GEN"
#define SUN_CKM_ECDH1_DERIVE "CKM_ECDH1_DERIVE"
#define SUN_CKM_ECDSA_SHA1 "CKM_ECDSA_SHA1"
#define SUN_CKM_ECDSA "CKM_ECDSA"
/* Shared operation context format for CKM_RC4 */
typedef struct {
#if defined(__amd64)
uint32_t i, j;
uint32_t arr[256];
uint32_t flag;
#else
uchar_t arr[256];
uchar_t i, j;
#endif /* __amd64 */
uint64_t pad; /* For 64-bit alignment */
} arcfour_state_t;
/* Data arguments of cryptographic operations */
typedef enum crypto_data_format {
CRYPTO_DATA_RAW = 1,
CRYPTO_DATA_UIO,
} crypto_data_format_t;
typedef struct crypto_data {
crypto_data_format_t cd_format; /* Format identifier */
off_t cd_offset; /* Offset from the beginning */
size_t cd_length; /* # of bytes in use */
caddr_t cd_miscdata; /* ancillary data */
union {
/* Raw format */
iovec_t cdu_raw; /* Pointer and length */
/* uio scatter-gather format */
uio_t *cdu_uio;
} cdu; /* Crypto Data Union */
} crypto_data_t;
#define cd_raw cdu.cdu_raw
#define cd_uio cdu.cdu_uio
#define cd_mp cdu.cdu_mp
typedef struct crypto_dual_data {
crypto_data_t dd_data; /* The data */
off_t dd_offset2; /* Used by dual operation */
size_t dd_len2; /* # of bytes to take */
} crypto_dual_data_t;
#define dd_format dd_data.cd_format
#define dd_offset1 dd_data.cd_offset
#define dd_len1 dd_data.cd_length
#define dd_miscdata dd_data.cd_miscdata
#define dd_raw dd_data.cd_raw
#define dd_uio dd_data.cd_uio
#define dd_mp dd_data.cd_mp
/* The keys, and their contents */
typedef enum {
CRYPTO_KEY_RAW = 1, /* ck_data is a cleartext key */
CRYPTO_KEY_REFERENCE, /* ck_obj_id is an opaque reference */
CRYPTO_KEY_ATTR_LIST /* ck_attrs is a list of object attributes */
} crypto_key_format_t;
typedef uint64_t crypto_attr_type_t;
/* Attribute types to use for passing a RSA public key or a private key. */
#define SUN_CKA_MODULUS 0x00000120
#define SUN_CKA_MODULUS_BITS 0x00000121
#define SUN_CKA_PUBLIC_EXPONENT 0x00000122
#define SUN_CKA_PRIVATE_EXPONENT 0x00000123
#define SUN_CKA_PRIME_1 0x00000124
#define SUN_CKA_PRIME_2 0x00000125
#define SUN_CKA_EXPONENT_1 0x00000126
#define SUN_CKA_EXPONENT_2 0x00000127
#define SUN_CKA_COEFFICIENT 0x00000128
#define SUN_CKA_PRIME 0x00000130
#define SUN_CKA_SUBPRIME 0x00000131
#define SUN_CKA_BASE 0x00000132
#define CKK_EC 0x00000003
#define CKK_GENERIC_SECRET 0x00000010
#define CKK_RC4 0x00000012
#define CKK_AES 0x0000001F
#define CKK_DES 0x00000013
#define CKK_DES2 0x00000014
#define CKK_DES3 0x00000015
#define CKO_PUBLIC_KEY 0x00000002
#define CKO_PRIVATE_KEY 0x00000003
#define CKA_CLASS 0x00000000
#define CKA_VALUE 0x00000011
#define CKA_KEY_TYPE 0x00000100
#define CKA_VALUE_LEN 0x00000161
#define CKA_EC_PARAMS 0x00000180
#define CKA_EC_POINT 0x00000181
typedef uint32_t crypto_object_id_t;
typedef struct crypto_object_attribute {
crypto_attr_type_t oa_type; /* attribute type */
caddr_t oa_value; /* attribute value */
ssize_t oa_value_len; /* length of attribute value */
} crypto_object_attribute_t;
typedef struct crypto_key {
crypto_key_format_t ck_format; /* format identifier */
union {
/* for CRYPTO_KEY_RAW ck_format */
struct {
uint_t cku_v_length; /* # of bits in ck_data */
void *cku_v_data; /* ptr to key value */
} cku_key_value;
/* for CRYPTO_KEY_REFERENCE ck_format */
crypto_object_id_t cku_key_id; /* reference to object key */
/* for CRYPTO_KEY_ATTR_LIST ck_format */
struct {
uint_t cku_a_count; /* number of attributes */
crypto_object_attribute_t *cku_a_oattr;
} cku_key_attrs;
} cku_data; /* Crypto Key union */
} crypto_key_t;
#ifdef _SYSCALL32
typedef struct crypto_object_attribute32 {
uint64_t oa_type; /* attribute type */
caddr32_t oa_value; /* attribute value */
ssize32_t oa_value_len; /* length of attribute value */
} crypto_object_attribute32_t;
typedef struct crypto_key32 {
crypto_key_format_t ck_format; /* format identifier */
union {
/* for CRYPTO_KEY_RAW ck_format */
struct {
uint32_t cku_v_length; /* # of bytes in ck_data */
caddr32_t cku_v_data; /* ptr to key value */
} cku_key_value;
/* for CRYPTO_KEY_REFERENCE ck_format */
crypto_object_id_t cku_key_id; /* reference to object key */
/* for CRYPTO_KEY_ATTR_LIST ck_format */
struct {
uint32_t cku_a_count; /* number of attributes */
caddr32_t cku_a_oattr;
} cku_key_attrs;
} cku_data; /* Crypto Key union */
} crypto_key32_t;
#endif /* _SYSCALL32 */
#define ck_data cku_data.cku_key_value.cku_v_data
#define ck_length cku_data.cku_key_value.cku_v_length
#define ck_obj_id cku_data.cku_key_id
#define ck_count cku_data.cku_key_attrs.cku_a_count
#define ck_attrs cku_data.cku_key_attrs.cku_a_oattr
/*
* Raw key lengths are expressed in number of bits.
* The following macro returns the minimum number of
* bytes that can contain the specified number of bits.
* Round up without overflowing the integer type.
*/
#define CRYPTO_BITS2BYTES(n) ((n) == 0 ? 0 : (((n) - 1) >> 3) + 1)
#define CRYPTO_BYTES2BITS(n) ((n) << 3)
/* Providers */
typedef enum {
CRYPTO_HW_PROVIDER = 0,
CRYPTO_SW_PROVIDER,
CRYPTO_LOGICAL_PROVIDER
} crypto_provider_type_t;
typedef uint32_t crypto_provider_id_t;
#define KCF_PROVID_INVALID ((uint32_t)-1)
typedef struct crypto_provider_entry {
crypto_provider_id_t pe_provider_id;
uint_t pe_mechanism_count;
} crypto_provider_entry_t;
typedef struct crypto_dev_list_entry {
char le_dev_name[MAXNAMELEN];
uint_t le_dev_instance;
uint_t le_mechanism_count;
} crypto_dev_list_entry_t;
/* User type for authentication ioctls and SPI entry points */
typedef enum crypto_user_type {
CRYPTO_SO = 0,
CRYPTO_USER
} crypto_user_type_t;
/* Version for provider management ioctls and SPI entry points */
typedef struct crypto_version {
uchar_t cv_major;
uchar_t cv_minor;
} crypto_version_t;
/* session data structure opaque to the consumer */
typedef void *crypto_session_t;
/* provider data structure opaque to the consumer */
typedef void *crypto_provider_t;
/* Limits used by both consumers and providers */
#define CRYPTO_EXT_SIZE_LABEL 32
#define CRYPTO_EXT_SIZE_MANUF 32
#define CRYPTO_EXT_SIZE_MODEL 16
#define CRYPTO_EXT_SIZE_SERIAL 16
#define CRYPTO_EXT_SIZE_TIME 16
typedef struct crypto_provider_ext_info {
uchar_t ei_label[CRYPTO_EXT_SIZE_LABEL];
uchar_t ei_manufacturerID[CRYPTO_EXT_SIZE_MANUF];
uchar_t ei_model[CRYPTO_EXT_SIZE_MODEL];
uchar_t ei_serial_number[CRYPTO_EXT_SIZE_SERIAL];
ulong_t ei_flags;
ulong_t ei_max_session_count;
ulong_t ei_max_pin_len;
ulong_t ei_min_pin_len;
ulong_t ei_total_public_memory;
ulong_t ei_free_public_memory;
ulong_t ei_total_private_memory;
ulong_t ei_free_private_memory;
crypto_version_t ei_hardware_version;
crypto_version_t ei_firmware_version;
uchar_t ei_time[CRYPTO_EXT_SIZE_TIME];
int ei_hash_max_input_len;
int ei_hmac_max_input_len;
} crypto_provider_ext_info_t;
typedef uint_t crypto_session_id_t;
typedef enum cmd_type {
COPY_FROM_DATA,
COPY_TO_DATA,
COMPARE_TO_DATA,
MD5_DIGEST_DATA,
SHA1_DIGEST_DATA,
SHA2_DIGEST_DATA,
GHASH_DATA
} cmd_type_t;
#define CRYPTO_DO_UPDATE 0x01
#define CRYPTO_DO_FINAL 0x02
#define CRYPTO_DO_MD5 0x04
#define CRYPTO_DO_SHA1 0x08
#define CRYPTO_DO_SIGN 0x10
#define CRYPTO_DO_VERIFY 0x20
#define CRYPTO_DO_SHA2 0x40
#define PROVIDER_OWNS_KEY_SCHEDULE 0x00000001
/*
* Common cryptographic status and error codes.
*/
#define CRYPTO_SUCCESS 0x00000000
#define CRYPTO_CANCEL 0x00000001
#define CRYPTO_HOST_MEMORY 0x00000002
#define CRYPTO_GENERAL_ERROR 0x00000003
#define CRYPTO_FAILED 0x00000004
#define CRYPTO_ARGUMENTS_BAD 0x00000005
#define CRYPTO_ATTRIBUTE_READ_ONLY 0x00000006
#define CRYPTO_ATTRIBUTE_SENSITIVE 0x00000007
#define CRYPTO_ATTRIBUTE_TYPE_INVALID 0x00000008
#define CRYPTO_ATTRIBUTE_VALUE_INVALID 0x00000009
#define CRYPTO_CANCELED 0x0000000A
#define CRYPTO_DATA_INVALID 0x0000000B
#define CRYPTO_DATA_LEN_RANGE 0x0000000C
#define CRYPTO_DEVICE_ERROR 0x0000000D
#define CRYPTO_DEVICE_MEMORY 0x0000000E
#define CRYPTO_DEVICE_REMOVED 0x0000000F
#define CRYPTO_ENCRYPTED_DATA_INVALID 0x00000010
#define CRYPTO_ENCRYPTED_DATA_LEN_RANGE 0x00000011
#define CRYPTO_KEY_HANDLE_INVALID 0x00000012
#define CRYPTO_KEY_SIZE_RANGE 0x00000013
#define CRYPTO_KEY_TYPE_INCONSISTENT 0x00000014
#define CRYPTO_KEY_NOT_NEEDED 0x00000015
#define CRYPTO_KEY_CHANGED 0x00000016
#define CRYPTO_KEY_NEEDED 0x00000017
#define CRYPTO_KEY_INDIGESTIBLE 0x00000018
#define CRYPTO_KEY_FUNCTION_NOT_PERMITTED 0x00000019
#define CRYPTO_KEY_NOT_WRAPPABLE 0x0000001A
#define CRYPTO_KEY_UNEXTRACTABLE 0x0000001B
#define CRYPTO_MECHANISM_INVALID 0x0000001C
#define CRYPTO_MECHANISM_PARAM_INVALID 0x0000001D
#define CRYPTO_OBJECT_HANDLE_INVALID 0x0000001E
#define CRYPTO_OPERATION_IS_ACTIVE 0x0000001F
#define CRYPTO_OPERATION_NOT_INITIALIZED 0x00000020
#define CRYPTO_PIN_INCORRECT 0x00000021
#define CRYPTO_PIN_INVALID 0x00000022
#define CRYPTO_PIN_LEN_RANGE 0x00000023
#define CRYPTO_PIN_EXPIRED 0x00000024
#define CRYPTO_PIN_LOCKED 0x00000025
#define CRYPTO_SESSION_CLOSED 0x00000026
#define CRYPTO_SESSION_COUNT 0x00000027
#define CRYPTO_SESSION_HANDLE_INVALID 0x00000028
#define CRYPTO_SESSION_READ_ONLY 0x00000029
#define CRYPTO_SESSION_EXISTS 0x0000002A
#define CRYPTO_SESSION_READ_ONLY_EXISTS 0x0000002B
#define CRYPTO_SESSION_READ_WRITE_SO_EXISTS 0x0000002C
#define CRYPTO_SIGNATURE_INVALID 0x0000002D
#define CRYPTO_SIGNATURE_LEN_RANGE 0x0000002E
#define CRYPTO_TEMPLATE_INCOMPLETE 0x0000002F
#define CRYPTO_TEMPLATE_INCONSISTENT 0x00000030
#define CRYPTO_UNWRAPPING_KEY_HANDLE_INVALID 0x00000031
#define CRYPTO_UNWRAPPING_KEY_SIZE_RANGE 0x00000032
#define CRYPTO_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x00000033
#define CRYPTO_USER_ALREADY_LOGGED_IN 0x00000034
#define CRYPTO_USER_NOT_LOGGED_IN 0x00000035
#define CRYPTO_USER_PIN_NOT_INITIALIZED 0x00000036
#define CRYPTO_USER_TYPE_INVALID 0x00000037
#define CRYPTO_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000038
#define CRYPTO_USER_TOO_MANY_TYPES 0x00000039
#define CRYPTO_WRAPPED_KEY_INVALID 0x0000003A
#define CRYPTO_WRAPPED_KEY_LEN_RANGE 0x0000003B
#define CRYPTO_WRAPPING_KEY_HANDLE_INVALID 0x0000003C
#define CRYPTO_WRAPPING_KEY_SIZE_RANGE 0x0000003D
#define CRYPTO_WRAPPING_KEY_TYPE_INCONSISTENT 0x0000003E
#define CRYPTO_RANDOM_SEED_NOT_SUPPORTED 0x0000003F
#define CRYPTO_RANDOM_NO_RNG 0x00000040
#define CRYPTO_DOMAIN_PARAMS_INVALID 0x00000041
#define CRYPTO_BUFFER_TOO_SMALL 0x00000042
#define CRYPTO_INFORMATION_SENSITIVE 0x00000043
#define CRYPTO_NOT_SUPPORTED 0x00000044
#define CRYPTO_QUEUED 0x00000045
#define CRYPTO_BUFFER_TOO_BIG 0x00000046
#define CRYPTO_INVALID_CONTEXT 0x00000047
#define CRYPTO_INVALID_MAC 0x00000048
#define CRYPTO_MECH_NOT_SUPPORTED 0x00000049
#define CRYPTO_INCONSISTENT_ATTRIBUTE 0x0000004A
#define CRYPTO_NO_PERMISSION 0x0000004B
#define CRYPTO_INVALID_PROVIDER_ID 0x0000004C
#define CRYPTO_VERSION_MISMATCH 0x0000004D
#define CRYPTO_BUSY 0x0000004E
#define CRYPTO_UNKNOWN_PROVIDER 0x0000004F
#define CRYPTO_MODVERIFICATION_FAILED 0x00000050
#define CRYPTO_OLD_CTX_TEMPLATE 0x00000051
#define CRYPTO_WEAK_KEY 0x00000052
#define CRYPTO_FIPS140_ERROR 0x00000053
/*
* Don't forget to update CRYPTO_LAST_ERROR and the error_number_table[]
* in kernelUtil.c when new error code is added.
*/
#define CRYPTO_LAST_ERROR 0x00000053
/*
* Special values that can be used to indicate that information is unavailable
* or that there is not practical limit. These values can be used
* by fields of the SPI crypto_provider_ext_info(9S) structure.
* The value of CRYPTO_UNAVAILABLE_INFO should be the same as
* CK_UNAVAILABLE_INFO in the PKCS#11 spec.
*/
#define CRYPTO_UNAVAILABLE_INFO ((ulong_t)(-1))
#define CRYPTO_EFFECTIVELY_INFINITE 0x0
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_COMMON_H */

41
include/sys/crypto/icp.h Normal file
View File

@ -0,0 +1,41 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, Datto, Inc. All rights reserved.
*/
#ifndef _SYS_CRYPTO_ALGS_H
#define _SYS_CRYPTO_ALGS_H
int aes_mod_init(void);
int aes_mod_fini(void);
int sha1_mod_init(void);
int sha1_mod_fini(void);
int sha2_mod_init(void);
int sha2_mod_fini(void);
int icp_init(void);
void icp_fini(void);
#endif /* _SYS_CRYPTO_ALGS_H */

View File

@ -62,6 +62,7 @@
#include <vm/seg_kmem.h>
#include <sys/zone.h>
#include <sys/sdt.h>
#include <sys/kstat.h>
#include <sys/zfs_debug.h>
#include <sys/zfs_delay.h>
#include <sys/fm/fs/zfs.h>
@ -445,7 +446,9 @@ typedef enum kmem_cbrc {
/*
* Task queues
*/
typedef struct taskq taskq_t;
#define TASKQ_NAMELEN 31
typedef uintptr_t taskqid_t;
typedef void (task_func_t)(void *);
@ -457,6 +460,25 @@ typedef struct taskq_ent {
uintptr_t tqent_flags;
} taskq_ent_t;
typedef struct taskq {
char tq_name[TASKQ_NAMELEN + 1];
kmutex_t tq_lock;
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
kcondvar_t tq_wait_cv;
kthread_t **tq_threadlist;
int tq_flags;
int tq_active;
int tq_nthreads;
int tq_nalloc;
int tq_minalloc;
int tq_maxalloc;
kcondvar_t tq_maxalloc_cv;
int tq_maxalloc_wait;
taskq_ent_t *tq_freelist;
taskq_ent_t tq_task;
} taskq_t;
#define TQENT_FLAG_PREALLOC 0x1 /* taskq_dispatch_ent used */
#define TASKQ_PREPOPULATE 0x0001
@ -651,6 +673,8 @@ extern uint64_t physmem;
extern int highbit64(uint64_t i);
extern int lowbit64(uint64_t i);
extern int highbit(ulong_t i);
extern int lowbit(ulong_t i);
extern int random_get_bytes(uint8_t *ptr, size_t len);
extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len);
@ -658,6 +682,8 @@ extern void kernel_init(int);
extern void kernel_fini(void);
extern void thread_init(void);
extern void thread_fini(void);
extern void random_init(void);
extern void random_fini(void);
struct spa;
extern void nicenum(uint64_t num, char *buf);

View File

@ -1,7 +1,7 @@
# NB: GNU Automake Manual, Chapter 8.3.5: Libtool Convenience Libraries
# These five libraries are intermediary build components.
SUBDIRS = libspl libavl libefi libshare libunicode
# These six libraries are intermediary build components.
SUBDIRS = libspl libavl libefi libshare libunicode libicp
# These four libraries, which are installed as the final build product,
# incorporate the five convenience libraries given above.
# incorporate the six convenience libraries given above.
SUBDIRS += libuutil libnvpair libzpool libzfs_core libzfs

78
lib/libicp/Makefile.am Normal file
View File

@ -0,0 +1,78 @@
include $(top_srcdir)/config/Rules.am
VPATH = \
$(top_srcdir)/module/icp \
$(top_srcdir)/lib/libicp
AM_CFLAGS += $(DEBUG_STACKFLAGS) $(FRAME_LARGER_THAN)
DEFAULT_INCLUDES += \
-I$(top_srcdir)/include \
-I$(top_srcdir)/module/icp/include \
-I$(top_srcdir)/lib/libspl/include
noinst_LTLIBRARIES = libicp.la
if TARGET_ASM_X86_64
ASM_SOURCES_C = asm-x86_64/aes/aeskey.c
ASM_SOURCES_AS = \
asm-x86_64/aes/aes_amd64.S \
asm-x86_64/aes/aes_intel.S \
asm-x86_64/modes/gcm_intel.S \
asm-x86_64/sha1/sha1-x86_64.S \
asm-x86_64/sha2/sha256_impl.S
endif
if TARGET_ASM_I386
ASM_SOURCES_C =
ASM_SOURCES_AS =
endif
if TARGET_ASM_GENERIC
ASM_SOURCES_C =
ASM_SOURCES_AS =
endif
USER_C =
USER_ASM =
KERNEL_C = \
spi/kcf_spi.c \
api/kcf_ctxops.c \
api/kcf_digest.c \
api/kcf_cipher.c \
api/kcf_miscapi.c \
api/kcf_mac.c \
algs/aes/aes_impl.c \
algs/aes/aes_modes.c \
algs/modes/modes.c \
algs/modes/cbc.c \
algs/modes/gcm.c \
algs/modes/ctr.c \
algs/modes/ccm.c \
algs/modes/ecb.c \
algs/sha1/sha1.c \
algs/sha2/sha2.c \
illumos-crypto.c \
io/aes.c \
io/sha1_mod.c \
io/sha2_mod.c \
os/modhash.c \
os/modconf.c \
core/kcf_sched.c \
core/kcf_prov_lib.c \
core/kcf_callprov.c \
core/kcf_mech_tabs.c \
core/kcf_prov_tabs.c \
$(ASM_SOURCES_C)
KERNEL_ASM = $(ASM_SOURCES_AS)
nodist_libicp_la_SOURCES = \
$(USER_C) \
$(USER_ASM) \
$(KERNEL_C) \
$(KERNEL_ASM)
libicp_la_LIBADD = -lrt

View File

@ -128,6 +128,28 @@ extern in_port_t ntohs(in_port_t);
#define BE_64(x) BSWAP_64(x)
#endif
#ifdef _BIG_ENDIAN
static __inline__ uint64_t
htonll(uint64_t n) {
return (n);
}
static __inline__ uint64_t
ntohll(uint64_t n) {
return (n);
}
#else
static __inline__ uint64_t
htonll(uint64_t n) {
return ((((uint64_t)htonl(n)) << 32) + htonl(n >> 32));
}
static __inline__ uint64_t
ntohll(uint64_t n) {
return ((((uint64_t)ntohl(n)) << 32) + ntohl(n >> 32));
}
#endif
/*
* Macros to read unaligned values from a specific byte order to
* native byte order

View File

@ -33,7 +33,7 @@
#define FREAD 1
#define FWRITE 2
// #define FAPPEND 8
// #define FAPPEND 8
#define FCREAT O_CREAT
#define FTRUNC O_TRUNC

View File

@ -64,6 +64,7 @@ typedef int major_t;
typedef int minor_t;
typedef ushort_t o_mode_t; /* old file attribute type */
typedef short index_t;
/*
* Definitions remaining from previous partial support for 64-bit file

View File

@ -126,7 +126,8 @@ nodist_libzpool_la_SOURCES = \
libzpool_la_LIBADD = \
$(top_builddir)/lib/libunicode/libunicode.la \
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libnvpair/libnvpair.la
$(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libicp/libicp.la
libzpool_la_LIBADD += $(ZLIB)
libzpool_la_LDFLAGS = -version-info 2:0:0

View File

@ -41,6 +41,7 @@
#include <sys/time.h>
#include <sys/systeminfo.h>
#include <zfs_fletcher.h>
#include <sys/crypto/icp.h>
/*
* Emulation of kernel services in userland.
@ -1113,9 +1114,96 @@ lowbit64(uint64_t i)
return (h);
}
/*
* Find highest one bit set.
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
* High order bit is 31 (or 63 in _LP64 kernel).
*/
int
highbit(ulong_t i)
{
register int h = 1;
if (i == 0)
return (0);
#ifdef _LP64
if (i & 0xffffffff00000000ul) {
h += 32; i >>= 32;
}
#endif
if (i & 0xffff0000) {
h += 16; i >>= 16;
}
if (i & 0xff00) {
h += 8; i >>= 8;
}
if (i & 0xf0) {
h += 4; i >>= 4;
}
if (i & 0xc) {
h += 2; i >>= 2;
}
if (i & 0x2) {
h += 1;
}
return (h);
}
/*
* Find lowest one bit set.
* Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
* Low order bit is 0.
*/
int
lowbit(ulong_t i)
{
register int h = 1;
if (i == 0)
return (0);
#ifdef _LP64
if (!(i & 0xffffffff)) {
h += 32; i >>= 32;
}
#endif
if (!(i & 0xffff)) {
h += 16; i >>= 16;
}
if (!(i & 0xff)) {
h += 8; i >>= 8;
}
if (!(i & 0xf)) {
h += 4; i >>= 4;
}
if (!(i & 0x3)) {
h += 2; i >>= 2;
}
if (!(i & 0x1)) {
h += 1;
}
return (h);
}
static int random_fd = -1, urandom_fd = -1;
void
random_init(void)
{
VERIFY((random_fd = open("/dev/random", O_RDONLY)) != -1);
VERIFY((urandom_fd = open("/dev/urandom", O_RDONLY)) != -1);
}
void
random_fini(void)
{
close(random_fd);
close(urandom_fd);
random_fd = -1;
urandom_fd = -1;
}
static int
random_get_bytes_common(uint8_t *ptr, size_t len, int fd)
{
@ -1228,12 +1316,13 @@ kernel_init(int mode)
(void) snprintf(hw_serial, sizeof (hw_serial), "%ld",
(mode & FWRITE) ? get_system_hostid() : 0);
VERIFY((random_fd = open("/dev/random", O_RDONLY)) != -1);
VERIFY((urandom_fd = open("/dev/urandom", O_RDONLY)) != -1);
random_init();
VERIFY0(uname(&hw_utsname));
thread_init();
system_taskq_init();
icp_init();
spa_init(mode);
@ -1248,14 +1337,11 @@ kernel_fini(void)
fletcher_4_fini();
spa_fini();
icp_fini();
system_taskq_fini();
thread_fini();
close(random_fd);
close(urandom_fd);
random_fd = -1;
urandom_fd = -1;
random_fini();
}
uid_t

View File

@ -34,26 +34,6 @@ int taskq_now;
taskq_t *system_taskq;
#define TASKQ_ACTIVE 0x00010000
#define TASKQ_NAMELEN 31
struct taskq {
char tq_name[TASKQ_NAMELEN + 1];
kmutex_t tq_lock;
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
kcondvar_t tq_wait_cv;
kthread_t **tq_threadlist;
int tq_flags;
int tq_active;
int tq_nthreads;
int tq_nalloc;
int tq_minalloc;
int tq_maxalloc;
kcondvar_t tq_maxalloc_cv;
int tq_maxalloc_wait;
taskq_ent_t *tq_freelist;
taskq_ent_t tq_task;
};
static taskq_ent_t *
task_alloc(taskq_t *tq, int tqflags)

View File

@ -4,6 +4,7 @@ subdir-m += unicode
subdir-m += zcommon
subdir-m += zfs
subdir-m += zpios
subdir-m += icp
INSTALL_MOD_DIR ?= extra
@ -12,6 +13,8 @@ ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h
ZFS_MODULE_CFLAGS += -I@abs_top_srcdir@/include -I@SPL@/include -I@SPL@
export ZFS_MODULE_CFLAGS
SUBDIR_TARGETS = icp
modules:
@# Make the exported SPL symbols available to these modules.
@# They may be in the root of SPL_OBJ when building against
@ -28,6 +31,9 @@ modules:
"*** - @SPL_OBJ@/module/@SPL_SYMBOLS@\n"; \
exit 1; \
fi
list='$(SUBDIR_TARGETS)'; for targetdir in $$list; do \
$(MAKE) -C $$targetdir; \
done
$(MAKE) -C @LINUX_OBJ@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ CONFIG_ZFS=m $@
clean:
@ -64,8 +70,8 @@ modules_uninstall:
distdir:
list='$(subdir-m)'; for subdir in $$list; do \
(find @top_srcdir@/module/$$subdir -name '*.c' -o -name '*.h' |\
xargs /bin/cp -t $$distdir/$$subdir); \
(cd @top_srcdir@/module && find $$subdir -name '*.c' -o -name '*.h' -o -name '*.S' |\
xargs /bin/cp --parents -t $$distdir); \
done
distclean maintainer-clean: clean

82
module/icp/Makefile.in Normal file
View File

@ -0,0 +1,82 @@
src = @abs_top_srcdir@/module/icp
obj = @abs_builddir@
MODULE := icp
TARGET_ASM_DIR = @TARGET_ASM_DIR@
ifeq ($(TARGET_ASM_DIR), asm-x86_64)
ASM_SOURCES := asm-x86_64/aes/aeskey.o
ASM_SOURCES += asm-x86_64/aes/aes_amd64.o
ASM_SOURCES += asm-x86_64/aes/aes_intel.o
ASM_SOURCES += asm-x86_64/modes/gcm_intel.o
ASM_SOURCES += asm-x86_64/sha1/sha1-x86_64.o
ASM_SOURCES += asm-x86_64/sha2/sha256_impl.o
endif
ifeq ($(TARGET_ASM_DIR), asm-i386)
ASM_SOURCES :=
endif
ifeq ($(TARGET_ASM_DIR), asm-generic)
ASM_SOURCES :=
endif
EXTRA_CFLAGS = $(ZFS_MODULE_CFLAGS) @KERNELCPPFLAGS@
obj-$(CONFIG_ZFS) := $(MODULE).o
ccflags-y += -I$(src)/include
asflags-y += -I$(src)/include
asflags-y += $(ZFS_MODULE_CFLAGS)
$(MODULE)-objs += illumos-crypto.o
$(MODULE)-objs += api/kcf_cipher.o
$(MODULE)-objs += api/kcf_digest.o
$(MODULE)-objs += api/kcf_mac.o
$(MODULE)-objs += api/kcf_miscapi.o
$(MODULE)-objs += api/kcf_ctxops.o
$(MODULE)-objs += core/kcf_callprov.o
$(MODULE)-objs += core/kcf_prov_tabs.o
$(MODULE)-objs += core/kcf_sched.o
$(MODULE)-objs += core/kcf_mech_tabs.o
$(MODULE)-objs += core/kcf_prov_lib.o
$(MODULE)-objs += spi/kcf_spi.o
$(MODULE)-objs += io/aes.o
$(MODULE)-objs += io/sha1_mod.o
$(MODULE)-objs += io/sha2_mod.o
$(MODULE)-objs += os/modhash.o
$(MODULE)-objs += os/modconf.o
$(MODULE)-objs += algs/modes/cbc.o
$(MODULE)-objs += algs/modes/ccm.o
$(MODULE)-objs += algs/modes/ctr.o
$(MODULE)-objs += algs/modes/ecb.o
$(MODULE)-objs += algs/modes/gcm.o
$(MODULE)-objs += algs/modes/modes.o
$(MODULE)-objs += algs/aes/aes_impl.o
$(MODULE)-objs += algs/aes/aes_modes.o
$(MODULE)-objs += algs/sha1/sha1.o
$(MODULE)-objs += algs/sha2/sha2.o
$(MODULE)-objs += $(ASM_SOURCES)
ICP_DIRS = \
api \
core \
spi \
io \
os \
algs \
algs/aes \
algs/modes \
algs/sha1 \
algs/sha2 \
asm-x86_64 \
asm-x86_64/aes \
asm-x86_64/modes \
asm-x86_64/sha1 \
asm-x86_64/sha2 \
asm-i386 \
asm-generic
all:
mkdir -p $(ICP_DIRS)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,135 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <aes/aes_impl.h>
/* Copy a 16-byte AES block from "in" to "out" */
void
aes_copy_block(uint8_t *in, uint8_t *out)
{
if (IS_P2ALIGNED2(in, out, sizeof (uint32_t))) {
/* LINTED: pointer alignment */
*(uint32_t *)&out[0] = *(uint32_t *)&in[0];
/* LINTED: pointer alignment */
*(uint32_t *)&out[4] = *(uint32_t *)&in[4];
/* LINTED: pointer alignment */
*(uint32_t *)&out[8] = *(uint32_t *)&in[8];
/* LINTED: pointer alignment */
*(uint32_t *)&out[12] = *(uint32_t *)&in[12];
} else {
AES_COPY_BLOCK(in, out);
}
}
/* XOR a 16-byte AES block of data into dst */
void
aes_xor_block(uint8_t *data, uint8_t *dst)
{
if (IS_P2ALIGNED2(dst, data, sizeof (uint32_t))) {
/* LINTED: pointer alignment */
*(uint32_t *)&dst[0] ^= *(uint32_t *)&data[0];
/* LINTED: pointer alignment */
*(uint32_t *)&dst[4] ^= *(uint32_t *)&data[4];
/* LINTED: pointer alignment */
*(uint32_t *)&dst[8] ^= *(uint32_t *)&data[8];
/* LINTED: pointer alignment */
*(uint32_t *)&dst[12] ^= *(uint32_t *)&data[12];
} else {
AES_XOR_BLOCK(data, dst);
}
}
/*
* Encrypt multiple blocks of data according to mode.
*/
int
aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
crypto_data_t *out)
{
aes_ctx_t *aes_ctx = ctx;
int rv;
if (aes_ctx->ac_flags & CTR_MODE) {
rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
} else if (aes_ctx->ac_flags & CCM_MODE) {
rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
rv = gcm_mode_encrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & CBC_MODE) {
rv = cbc_encrypt_contiguous_blocks(ctx,
data, length, out, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
} else {
rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_encrypt_block);
}
return (rv);
}
/*
* Decrypt multiple blocks of data according to mode.
*/
int
aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
crypto_data_t *out)
{
aes_ctx_t *aes_ctx = ctx;
int rv;
if (aes_ctx->ac_flags & CTR_MODE) {
rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (rv == CRYPTO_DATA_LEN_RANGE)
rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
} else if (aes_ctx->ac_flags & CCM_MODE) {
rv = ccm_mode_decrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
rv = gcm_mode_decrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
} else if (aes_ctx->ac_flags & CBC_MODE) {
rv = cbc_decrypt_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_decrypt_block, aes_copy_block,
aes_xor_block);
} else {
rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
AES_BLOCK_LEN, aes_decrypt_block);
if (rv == CRYPTO_DATA_LEN_RANGE)
rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
}
return (rv);
}

305
module/icp/algs/modes/cbc.c Normal file
View File

@ -0,0 +1,305 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
/*
* Algorithm independent CBC functions.
*/
int
cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*encrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = (uint8_t *)ctx->cbc_iv;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->cbc_remainder_len > 0) {
need = block_size - ctx->cbc_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
blockp = datap;
}
if (out == NULL) {
/*
* XOR the previous cipher block or IV with the
* current clear block.
*/
xor_block(lastp, blockp);
encrypt(ctx->cbc_keysched, blockp, blockp);
ctx->cbc_lastp = blockp;
lastp = blockp;
if (ctx->cbc_remainder_len > 0) {
bcopy(blockp, ctx->cbc_copy_to,
ctx->cbc_remainder_len);
bcopy(blockp + ctx->cbc_remainder_len, datap,
need);
}
} else {
/*
* XOR the previous cipher block or IV with the
* current clear block.
*/
xor_block(blockp, lastp);
encrypt(ctx->cbc_keysched, lastp, lastp);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
bcopy(lastp, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len,
out_data_2,
block_size - out_data_1_len);
}
}
/* update offset */
out->cd_offset += block_size;
}
/* Update pointer to next block of data to be processed. */
if (ctx->cbc_remainder_len != 0) {
datap += need;
ctx->cbc_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->cbc_remainder, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_copy_to = datap;
goto out;
}
ctx->cbc_copy_to = NULL;
} while (remainder > 0);
out:
/*
* Save the last encrypted block in the context.
*/
if (ctx->cbc_lastp != NULL) {
copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv);
ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv;
}
return (CRYPTO_SUCCESS);
}
#define OTHER(a, ctx) \
(((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock)
/* ARGSUSED */
int
cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*decrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = ctx->cbc_lastp;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->cbc_remainder_len > 0) {
need = block_size - ctx->cbc_remainder_len;
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
blockp = datap;
}
/* LINTED: pointer alignment */
copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx));
if (out != NULL) {
decrypt(ctx->cbc_keysched, blockp,
(uint8_t *)ctx->cbc_remainder);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
decrypt(ctx->cbc_keysched, blockp, blockp);
}
/*
* XOR the previous cipher block or IV with the
* currently decrypted block.
*/
xor_block(lastp, blockp);
/* LINTED: pointer alignment */
lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx);
if (out != NULL) {
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
bcopy(blockp, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(blockp + out_data_1_len, out_data_2,
block_size - out_data_1_len);
}
/* update offset */
out->cd_offset += block_size;
} else if (ctx->cbc_remainder_len > 0) {
/* copy temporary block to where it belongs */
bcopy(blockp, ctx->cbc_copy_to, ctx->cbc_remainder_len);
bcopy(blockp + ctx->cbc_remainder_len, datap, need);
}
/* Update pointer to next block of data to be processed. */
if (ctx->cbc_remainder_len != 0) {
datap += need;
ctx->cbc_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->cbc_remainder, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_lastp = lastp;
ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
ctx->cbc_copy_to = NULL;
} while (remainder > 0);
ctx->cbc_lastp = lastp;
return (CRYPTO_SUCCESS);
}
int
cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len,
size_t block_size, void (*copy_block)(uint8_t *, uint64_t *))
{
/*
* Copy IV into context.
*
* If cm_param == NULL then the IV comes from the
* cd_miscdata field in the crypto_data structure.
*/
if (param != NULL) {
ASSERT(param_len == block_size);
copy_block((uchar_t *)param, cbc_ctx->cbc_iv);
}
cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0];
cbc_ctx->cbc_flags |= CBC_MODE;
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
void *
cbc_alloc_ctx(int kmflag)
{
cbc_ctx_t *cbc_ctx;
if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL)
return (NULL);
cbc_ctx->cbc_flags = CBC_MODE;
return (cbc_ctx);
}

920
module/icp/algs/modes/ccm.c Normal file
View File

@ -0,0 +1,920 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#if defined(__i386) || defined(__amd64)
#include <sys/byteorder.h>
#define UNALIGNED_POINTERS_PERMITTED
#endif
/*
* Encrypt multiple blocks of data in CCM mode. Decrypt for CCM mode
* is done in another function.
*/
int
ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
uint64_t counter;
uint8_t *mac_buf;
if (length + ctx->ccm_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
length);
ctx->ccm_remainder_len += length;
ctx->ccm_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = (uint8_t *)ctx->ccm_cb;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
mac_buf = (uint8_t *)ctx->ccm_mac_buf;
do {
/* Unprocessed data from last call. */
if (ctx->ccm_remainder_len > 0) {
need = block_size - ctx->ccm_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], need);
blockp = (uint8_t *)ctx->ccm_remainder;
} else {
blockp = datap;
}
/*
* do CBC MAC
*
* XOR the previous cipher block current clear block.
* mac_buf always contain previous cipher block.
*/
xor_block(blockp, mac_buf);
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
/* ccm_cb is the counter block */
encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb,
(uint8_t *)ctx->ccm_tmp);
lastp = (uint8_t *)ctx->ccm_tmp;
/*
* Increment counter. Counter bits are confined
* to the bottom 64 bits of the counter block.
*/
#ifdef _LITTLE_ENDIAN
counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
counter = htonll(counter + 1);
#else
counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
counter++;
#endif /* _LITTLE_ENDIAN */
counter &= ctx->ccm_counter_mask;
ctx->ccm_cb[1] =
(ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
/*
* XOR encrypted counter block with the current clear block.
*/
xor_block(blockp, lastp);
ctx->ccm_processed_data_len += block_size;
if (out == NULL) {
if (ctx->ccm_remainder_len > 0) {
bcopy(blockp, ctx->ccm_copy_to,
ctx->ccm_remainder_len);
bcopy(blockp + ctx->ccm_remainder_len, datap,
need);
}
} else {
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
bcopy(lastp, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len,
out_data_2,
block_size - out_data_1_len);
}
}
/* update offset */
out->cd_offset += block_size;
}
/* Update pointer to next block of data to be processed. */
if (ctx->ccm_remainder_len != 0) {
datap += need;
ctx->ccm_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ccm_remainder, remainder);
ctx->ccm_remainder_len = remainder;
ctx->ccm_copy_to = datap;
goto out;
}
ctx->ccm_copy_to = NULL;
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
void
calculate_ccm_mac(ccm_ctx_t *ctx, uint8_t *ccm_mac,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
{
uint64_t counter;
uint8_t *counterp, *mac_buf;
int i;
mac_buf = (uint8_t *)ctx->ccm_mac_buf;
/* first counter block start with index 0 */
counter = 0;
ctx->ccm_cb[1] = (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
counterp = (uint8_t *)ctx->ccm_tmp;
encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
/* calculate XOR of MAC with first counter block */
for (i = 0; i < ctx->ccm_mac_len; i++) {
ccm_mac[i] = mac_buf[i] ^ counterp[i];
}
}
/* ARGSUSED */
int
ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
uint8_t *lastp, *mac_buf, *ccm_mac_p, *macp = NULL;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
int i;
if (out->cd_length < (ctx->ccm_remainder_len + ctx->ccm_mac_len)) {
return (CRYPTO_DATA_LEN_RANGE);
}
/*
* When we get here, the number of bytes of payload processed
* plus whatever data remains, if any,
* should be the same as the number of bytes that's being
* passed in the argument during init time.
*/
if ((ctx->ccm_processed_data_len + ctx->ccm_remainder_len)
!= (ctx->ccm_data_len)) {
return (CRYPTO_DATA_LEN_RANGE);
}
mac_buf = (uint8_t *)ctx->ccm_mac_buf;
if (ctx->ccm_remainder_len > 0) {
/* ccm_mac_input_buf is not used for encryption */
macp = (uint8_t *)ctx->ccm_mac_input_buf;
bzero(macp, block_size);
/* copy remainder to temporary buffer */
bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
/* calculate the CBC MAC */
xor_block(macp, mac_buf);
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
/* calculate the counter mode */
lastp = (uint8_t *)ctx->ccm_tmp;
encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, lastp);
/* XOR with counter block */
for (i = 0; i < ctx->ccm_remainder_len; i++) {
macp[i] ^= lastp[i];
}
ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
}
/* Calculate the CCM MAC */
ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
calculate_ccm_mac(ctx, ccm_mac_p, encrypt_block);
crypto_init_ptrs(out, &iov_or_mp, &offset);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2,
ctx->ccm_remainder_len + ctx->ccm_mac_len);
if (ctx->ccm_remainder_len > 0) {
/* copy temporary block to where it belongs */
if (out_data_2 == NULL) {
/* everything will fit in out_data_1 */
bcopy(macp, out_data_1, ctx->ccm_remainder_len);
bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
ctx->ccm_mac_len);
} else {
if (out_data_1_len < ctx->ccm_remainder_len) {
size_t data_2_len_used;
bcopy(macp, out_data_1, out_data_1_len);
data_2_len_used = ctx->ccm_remainder_len
- out_data_1_len;
bcopy((uint8_t *)macp + out_data_1_len,
out_data_2, data_2_len_used);
bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
ctx->ccm_mac_len);
} else {
bcopy(macp, out_data_1, out_data_1_len);
if (out_data_1_len == ctx->ccm_remainder_len) {
/* mac will be in out_data_2 */
bcopy(ccm_mac_p, out_data_2,
ctx->ccm_mac_len);
} else {
size_t len_not_used = out_data_1_len -
ctx->ccm_remainder_len;
/*
* part of mac in will be in
* out_data_1, part of the mac will be
* in out_data_2
*/
bcopy(ccm_mac_p,
out_data_1 + ctx->ccm_remainder_len,
len_not_used);
bcopy(ccm_mac_p + len_not_used,
out_data_2,
ctx->ccm_mac_len - len_not_used);
}
}
}
} else {
/* copy block to where it belongs */
bcopy(ccm_mac_p, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(ccm_mac_p + out_data_1_len, out_data_2,
block_size - out_data_1_len);
}
}
out->cd_offset += ctx->ccm_remainder_len + ctx->ccm_mac_len;
ctx->ccm_remainder_len = 0;
return (CRYPTO_SUCCESS);
}
/*
* This will only deal with decrypting the last block of the input that
* might not be a multiple of block length.
*/
void
ccm_decrypt_incomplete_block(ccm_ctx_t *ctx,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
{
uint8_t *datap, *outp, *counterp;
int i;
datap = (uint8_t *)ctx->ccm_remainder;
outp = &((ctx->ccm_pt_buf)[ctx->ccm_processed_data_len]);
counterp = (uint8_t *)ctx->ccm_tmp;
encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
/* XOR with counter block */
for (i = 0; i < ctx->ccm_remainder_len; i++) {
outp[i] = datap[i] ^ counterp[i];
}
}
/*
* This will decrypt the cipher text. However, the plaintext won't be
* returned to the caller. It will be returned when decrypt_final() is
* called if the MAC matches
*/
/* ARGSUSED */
int
ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *cbp;
uint64_t counter;
size_t pt_len, total_decrypted_len, mac_len, pm_len, pd_len;
uint8_t *resultp;
pm_len = ctx->ccm_processed_mac_len;
if (pm_len > 0) {
uint8_t *tmp;
/*
* all ciphertext has been processed, just waiting for
* part of the value of the mac
*/
if ((pm_len + length) > ctx->ccm_mac_len) {
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
}
tmp = (uint8_t *)ctx->ccm_mac_input_buf;
bcopy(datap, tmp + pm_len, length);
ctx->ccm_processed_mac_len += length;
return (CRYPTO_SUCCESS);
}
/*
* If we decrypt the given data, what total amount of data would
* have been decrypted?
*/
pd_len = ctx->ccm_processed_data_len;
total_decrypted_len = pd_len + length + ctx->ccm_remainder_len;
if (total_decrypted_len >
(ctx->ccm_data_len + ctx->ccm_mac_len)) {
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
}
pt_len = ctx->ccm_data_len;
if (total_decrypted_len > pt_len) {
/*
* part of the input will be the MAC, need to isolate that
* to be dealt with later. The left-over data in
* ccm_remainder_len from last time will not be part of the
* MAC. Otherwise, it would have already been taken out
* when this call is made last time.
*/
size_t pt_part = pt_len - pd_len - ctx->ccm_remainder_len;
mac_len = length - pt_part;
ctx->ccm_processed_mac_len = mac_len;
bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
if (pt_part + ctx->ccm_remainder_len < block_size) {
/*
* since this is last of the ciphertext, will
* just decrypt with it here
*/
bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], pt_part);
ctx->ccm_remainder_len += pt_part;
ccm_decrypt_incomplete_block(ctx, encrypt_block);
ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
ctx->ccm_remainder_len = 0;
return (CRYPTO_SUCCESS);
} else {
/* let rest of the code handle this */
length = pt_part;
}
} else if (length + ctx->ccm_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
length);
ctx->ccm_remainder_len += length;
ctx->ccm_copy_to = datap;
return (CRYPTO_SUCCESS);
}
do {
/* Unprocessed data from last call. */
if (ctx->ccm_remainder_len > 0) {
need = block_size - ctx->ccm_remainder_len;
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], need);
blockp = (uint8_t *)ctx->ccm_remainder;
} else {
blockp = datap;
}
/* Calculate the counter mode, ccm_cb is the counter block */
cbp = (uint8_t *)ctx->ccm_tmp;
encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, cbp);
/*
* Increment counter.
* Counter bits are confined to the bottom 64 bits
*/
#ifdef _LITTLE_ENDIAN
counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
counter = htonll(counter + 1);
#else
counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
counter++;
#endif /* _LITTLE_ENDIAN */
counter &= ctx->ccm_counter_mask;
ctx->ccm_cb[1] =
(ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
/* XOR with the ciphertext */
xor_block(blockp, cbp);
/* Copy the plaintext to the "holding buffer" */
resultp = (uint8_t *)ctx->ccm_pt_buf +
ctx->ccm_processed_data_len;
copy_block(cbp, resultp);
ctx->ccm_processed_data_len += block_size;
ctx->ccm_lastp = blockp;
/* Update pointer to next block of data to be processed. */
if (ctx->ccm_remainder_len != 0) {
datap += need;
ctx->ccm_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ccm_remainder, remainder);
ctx->ccm_remainder_len = remainder;
ctx->ccm_copy_to = datap;
if (ctx->ccm_processed_mac_len > 0) {
/*
* not expecting anymore ciphertext, just
* compute plaintext for the remaining input
*/
ccm_decrypt_incomplete_block(ctx,
encrypt_block);
ctx->ccm_processed_data_len += remainder;
ctx->ccm_remainder_len = 0;
}
goto out;
}
ctx->ccm_copy_to = NULL;
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
int
ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t mac_remain, pt_len;
uint8_t *pt, *mac_buf, *macp, *ccm_mac_p;
int rv;
pt_len = ctx->ccm_data_len;
/* Make sure output buffer can fit all of the plaintext */
if (out->cd_length < pt_len) {
return (CRYPTO_DATA_LEN_RANGE);
}
pt = ctx->ccm_pt_buf;
mac_remain = ctx->ccm_processed_data_len;
mac_buf = (uint8_t *)ctx->ccm_mac_buf;
macp = (uint8_t *)ctx->ccm_tmp;
while (mac_remain > 0) {
if (mac_remain < block_size) {
bzero(macp, block_size);
bcopy(pt, macp, mac_remain);
mac_remain = 0;
} else {
copy_block(pt, macp);
mac_remain -= block_size;
pt += block_size;
}
/* calculate the CBC MAC */
xor_block(macp, mac_buf);
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
}
/* Calculate the CCM MAC */
ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
/* compare the input CCM MAC value with what we calculated */
if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
/* They don't match */
return (CRYPTO_INVALID_MAC);
} else {
rv = crypto_put_output_data(ctx->ccm_pt_buf, out, pt_len);
if (rv != CRYPTO_SUCCESS)
return (rv);
out->cd_offset += pt_len;
}
return (CRYPTO_SUCCESS);
}
int
ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init)
{
size_t macSize, nonceSize;
uint8_t q;
uint64_t maxValue;
/*
* Check the length of the MAC. The only valid
* lengths for the MAC are: 4, 6, 8, 10, 12, 14, 16
*/
macSize = ccm_param->ulMACSize;
if ((macSize < 4) || (macSize > 16) || ((macSize % 2) != 0)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
/* Check the nonce length. Valid values are 7, 8, 9, 10, 11, 12, 13 */
nonceSize = ccm_param->ulNonceSize;
if ((nonceSize < 7) || (nonceSize > 13)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
/* q is the length of the field storing the length, in bytes */
q = (uint8_t)((15 - nonceSize) & 0xFF);
/*
* If it is decrypt, need to make sure size of ciphertext is at least
* bigger than MAC len
*/
if ((!is_encrypt_init) && (ccm_param->ulDataSize < macSize)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
/*
* Check to make sure the length of the payload is within the
* range of values allowed by q
*/
if (q < 8) {
maxValue = (1ULL << (q * 8)) - 1;
} else {
maxValue = ULONG_MAX;
}
if (ccm_param->ulDataSize > maxValue) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
return (CRYPTO_SUCCESS);
}
/*
* Format the first block used in CBC-MAC (B0) and the initial counter
* block based on formatting functions and counter generation functions
* specified in RFC 3610 and NIST publication 800-38C, appendix A
*
* b0 is the first block used in CBC-MAC
* cb0 is the first counter block
*
* It's assumed that the arguments b0 and cb0 are preallocated AES blocks
*
*/
static void
ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
ulong_t authDataSize, uint8_t *b0, ccm_ctx_t *aes_ctx)
{
uint64_t payloadSize;
uint8_t t, q, have_adata = 0;
size_t limit;
int i, j, k;
uint64_t mask = 0;
uint8_t *cb;
q = (uint8_t)((15 - nonceSize) & 0xFF);
t = (uint8_t)((aes_ctx->ccm_mac_len) & 0xFF);
/* Construct the first octet of b0 */
if (authDataSize > 0) {
have_adata = 1;
}
b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
/* copy the nonce value into b0 */
bcopy(nonce, &(b0[1]), nonceSize);
/* store the length of the payload into b0 */
bzero(&(b0[1+nonceSize]), q);
payloadSize = aes_ctx->ccm_data_len;
limit = 8 < q ? 8 : q;
for (i = 0, j = 0, k = 15; i < limit; i++, j += 8, k--) {
b0[k] = (uint8_t)((payloadSize >> j) & 0xFF);
}
/* format the counter block */
cb = (uint8_t *)aes_ctx->ccm_cb;
cb[0] = 0x07 & (q-1); /* first byte */
/* copy the nonce value into the counter block */
bcopy(nonce, &(cb[1]), nonceSize);
bzero(&(cb[1+nonceSize]), q);
/* Create the mask for the counter field based on the size of nonce */
q <<= 3;
while (q-- > 0) {
mask |= (1ULL << q);
}
#ifdef _LITTLE_ENDIAN
mask = htonll(mask);
#endif
aes_ctx->ccm_counter_mask = mask;
/*
* During calculation, we start using counter block 1, we will
* set it up right here.
* We can just set the last byte to have the value 1, because
* even with the biggest nonce of 13, the last byte of the
* counter block will be used for the counter value.
*/
cb[15] = 0x01;
}
/*
* Encode the length of the associated data as
* specified in RFC 3610 and NIST publication 800-38C, appendix A
*/
static void
encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len)
{
#ifdef UNALIGNED_POINTERS_PERMITTED
uint32_t *lencoded_ptr;
#ifdef _LP64
uint64_t *llencoded_ptr;
#endif
#endif /* UNALIGNED_POINTERS_PERMITTED */
if (auth_data_len < ((1ULL<<16) - (1ULL<<8))) {
/* 0 < a < (2^16-2^8) */
*encoded_len = 2;
encoded[0] = (auth_data_len & 0xff00) >> 8;
encoded[1] = auth_data_len & 0xff;
} else if ((auth_data_len >= ((1ULL<<16) - (1ULL<<8))) &&
(auth_data_len < (1ULL << 31))) {
/* (2^16-2^8) <= a < 2^32 */
*encoded_len = 6;
encoded[0] = 0xff;
encoded[1] = 0xfe;
#ifdef UNALIGNED_POINTERS_PERMITTED
lencoded_ptr = (uint32_t *)&encoded[2];
*lencoded_ptr = htonl(auth_data_len);
#else
encoded[2] = (auth_data_len & 0xff000000) >> 24;
encoded[3] = (auth_data_len & 0xff0000) >> 16;
encoded[4] = (auth_data_len & 0xff00) >> 8;
encoded[5] = auth_data_len & 0xff;
#endif /* UNALIGNED_POINTERS_PERMITTED */
#ifdef _LP64
} else {
/* 2^32 <= a < 2^64 */
*encoded_len = 10;
encoded[0] = 0xff;
encoded[1] = 0xff;
#ifdef UNALIGNED_POINTERS_PERMITTED
llencoded_ptr = (uint64_t *)&encoded[2];
*llencoded_ptr = htonl(auth_data_len);
#else
encoded[2] = (auth_data_len & 0xff00000000000000) >> 56;
encoded[3] = (auth_data_len & 0xff000000000000) >> 48;
encoded[4] = (auth_data_len & 0xff0000000000) >> 40;
encoded[5] = (auth_data_len & 0xff00000000) >> 32;
encoded[6] = (auth_data_len & 0xff000000) >> 24;
encoded[7] = (auth_data_len & 0xff0000) >> 16;
encoded[8] = (auth_data_len & 0xff00) >> 8;
encoded[9] = auth_data_len & 0xff;
#endif /* UNALIGNED_POINTERS_PERMITTED */
#endif /* _LP64 */
}
}
/*
* The following function should be call at encrypt or decrypt init time
* for AES CCM mode.
*/
int
ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
unsigned char *auth_data, size_t auth_data_len, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
uint8_t *mac_buf, *datap, *ivp, *authp;
size_t remainder, processed;
uint8_t encoded_a[10]; /* max encoded auth data length is 10 octets */
size_t encoded_a_len = 0;
mac_buf = (uint8_t *)&(ctx->ccm_mac_buf);
/*
* Format the 1st block for CBC-MAC and construct the
* 1st counter block.
*
* aes_ctx->ccm_iv is used for storing the counter block
* mac_buf will store b0 at this time.
*/
ccm_format_initial_blocks(nonce, nonce_len,
auth_data_len, mac_buf, ctx);
/* The IV for CBC MAC for AES CCM mode is always zero */
ivp = (uint8_t *)ctx->ccm_tmp;
bzero(ivp, block_size);
xor_block(ivp, mac_buf);
/* encrypt the nonce */
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
/* take care of the associated data, if any */
if (auth_data_len == 0) {
return (CRYPTO_SUCCESS);
}
encode_adata_len(auth_data_len, encoded_a, &encoded_a_len);
remainder = auth_data_len;
/* 1st block: it contains encoded associated data, and some data */
authp = (uint8_t *)ctx->ccm_tmp;
bzero(authp, block_size);
bcopy(encoded_a, authp, encoded_a_len);
processed = block_size - encoded_a_len;
if (processed > auth_data_len) {
/* in case auth_data is very small */
processed = auth_data_len;
}
bcopy(auth_data, authp+encoded_a_len, processed);
/* xor with previous buffer */
xor_block(authp, mac_buf);
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
remainder -= processed;
if (remainder == 0) {
/* a small amount of associated data, it's all done now */
return (CRYPTO_SUCCESS);
}
do {
if (remainder < block_size) {
/*
* There's not a block full of data, pad rest of
* buffer with zero
*/
bzero(authp, block_size);
bcopy(&(auth_data[processed]), authp, remainder);
datap = (uint8_t *)authp;
remainder = 0;
} else {
datap = (uint8_t *)(&(auth_data[processed]));
processed += block_size;
remainder -= block_size;
}
xor_block(datap, mac_buf);
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
} while (remainder > 0);
return (CRYPTO_SUCCESS);
}
int
ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag,
boolean_t is_encrypt_init, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
int rv;
CK_AES_CCM_PARAMS *ccm_param;
if (param != NULL) {
ccm_param = (CK_AES_CCM_PARAMS *)param;
if ((rv = ccm_validate_args(ccm_param,
is_encrypt_init)) != 0) {
return (rv);
}
ccm_ctx->ccm_mac_len = ccm_param->ulMACSize;
if (is_encrypt_init) {
ccm_ctx->ccm_data_len = ccm_param->ulDataSize;
} else {
ccm_ctx->ccm_data_len =
ccm_param->ulDataSize - ccm_ctx->ccm_mac_len;
ccm_ctx->ccm_processed_mac_len = 0;
}
ccm_ctx->ccm_processed_data_len = 0;
ccm_ctx->ccm_flags |= CCM_MODE;
} else {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
goto out;
}
if (ccm_init(ccm_ctx, ccm_param->nonce, ccm_param->ulNonceSize,
ccm_param->authData, ccm_param->ulAuthDataSize, block_size,
encrypt_block, xor_block) != 0) {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
goto out;
}
if (!is_encrypt_init) {
/* allocate buffer for storing decrypted plaintext */
ccm_ctx->ccm_pt_buf = vmem_alloc(ccm_ctx->ccm_data_len,
kmflag);
if (ccm_ctx->ccm_pt_buf == NULL) {
rv = CRYPTO_HOST_MEMORY;
}
}
out:
return (rv);
}
void *
ccm_alloc_ctx(int kmflag)
{
ccm_ctx_t *ccm_ctx;
if ((ccm_ctx = kmem_zalloc(sizeof (ccm_ctx_t), kmflag)) == NULL)
return (NULL);
ccm_ctx->ccm_flags = CCM_MODE;
return (ccm_ctx);
}

238
module/icp/algs/modes/ctr.c Normal file
View File

@ -0,0 +1,238 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/byteorder.h>
/*
* Encrypt and decrypt multiple blocks of data in counter mode.
*/
int
ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
uint64_t lower_counter, upper_counter;
if (length + ctx->ctr_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
length);
ctx->ctr_remainder_len += length;
ctx->ctr_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = (uint8_t *)ctx->ctr_cb;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->ctr_remainder_len > 0) {
need = block_size - ctx->ctr_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ctr_remainder)
[ctx->ctr_remainder_len], need);
blockp = (uint8_t *)ctx->ctr_remainder;
} else {
blockp = datap;
}
/* ctr_cb is the counter block */
cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
(uint8_t *)ctx->ctr_tmp);
lastp = (uint8_t *)ctx->ctr_tmp;
/*
* Increment Counter.
*/
lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask);
lower_counter = htonll(lower_counter + 1);
lower_counter &= ctx->ctr_lower_mask;
ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) |
lower_counter;
/* wrap around */
if (lower_counter == 0) {
upper_counter =
ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask);
upper_counter = htonll(upper_counter + 1);
upper_counter &= ctx->ctr_upper_mask;
ctx->ctr_cb[0] =
(ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) |
upper_counter;
}
/*
* XOR encrypted counter block with the current clear block.
*/
xor_block(blockp, lastp);
if (out == NULL) {
if (ctx->ctr_remainder_len > 0) {
bcopy(lastp, ctx->ctr_copy_to,
ctx->ctr_remainder_len);
bcopy(lastp + ctx->ctr_remainder_len, datap,
need);
}
} else {
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
bcopy(lastp, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len, out_data_2,
block_size - out_data_1_len);
}
/* update offset */
out->cd_offset += block_size;
}
/* Update pointer to next block of data to be processed. */
if (ctx->ctr_remainder_len != 0) {
datap += need;
ctx->ctr_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ctr_remainder, remainder);
ctx->ctr_remainder_len = remainder;
ctx->ctr_copy_to = datap;
goto out;
}
ctx->ctr_copy_to = NULL;
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
int
ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
{
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
uint8_t *p;
int i;
if (out->cd_length < ctx->ctr_remainder_len)
return (CRYPTO_DATA_LEN_RANGE);
encrypt_block(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
(uint8_t *)ctx->ctr_tmp);
lastp = (uint8_t *)ctx->ctr_tmp;
p = (uint8_t *)ctx->ctr_remainder;
for (i = 0; i < ctx->ctr_remainder_len; i++) {
p[i] ^= lastp[i];
}
crypto_init_ptrs(out, &iov_or_mp, &offset);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, ctx->ctr_remainder_len);
bcopy(p, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy((uint8_t *)p + out_data_1_len,
out_data_2, ctx->ctr_remainder_len - out_data_1_len);
}
out->cd_offset += ctx->ctr_remainder_len;
ctx->ctr_remainder_len = 0;
return (CRYPTO_SUCCESS);
}
int
ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
void (*copy_block)(uint8_t *, uint8_t *))
{
uint64_t upper_mask = 0;
uint64_t lower_mask = 0;
if (count == 0 || count > 128) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
/* upper 64 bits of the mask */
if (count >= 64) {
count -= 64;
upper_mask = (count == 64) ? UINT64_MAX : (1ULL << count) - 1;
lower_mask = UINT64_MAX;
} else {
/* now the lower 63 bits */
lower_mask = (1ULL << count) - 1;
}
ctr_ctx->ctr_lower_mask = htonll(lower_mask);
ctr_ctx->ctr_upper_mask = htonll(upper_mask);
copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb);
ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0];
ctr_ctx->ctr_flags |= CTR_MODE;
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
void *
ctr_alloc_ctx(int kmflag)
{
ctr_ctx_t *ctr_ctx;
if ((ctr_ctx = kmem_zalloc(sizeof (ctr_ctx_t), kmflag)) == NULL)
return (NULL);
ctr_ctx->ctr_flags = CTR_MODE;
return (ctr_ctx);
}

143
module/icp/algs/modes/ecb.c Normal file
View File

@ -0,0 +1,143 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
/*
* Algorithm independent ECB functions.
*/
int
ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
if (length + ctx->ecb_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
length);
ctx->ecb_remainder_len += length;
ctx->ecb_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = (uint8_t *)ctx->ecb_iv;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->ecb_remainder_len > 0) {
need = block_size - ctx->ecb_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ecb_remainder)
[ctx->ecb_remainder_len], need);
blockp = (uint8_t *)ctx->ecb_remainder;
} else {
blockp = datap;
}
if (out == NULL) {
cipher(ctx->ecb_keysched, blockp, blockp);
ctx->ecb_lastp = blockp;
lastp = blockp;
if (ctx->ecb_remainder_len > 0) {
bcopy(blockp, ctx->ecb_copy_to,
ctx->ecb_remainder_len);
bcopy(blockp + ctx->ecb_remainder_len, datap,
need);
}
} else {
cipher(ctx->ecb_keysched, blockp, lastp);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
bcopy(lastp, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len, out_data_2,
block_size - out_data_1_len);
}
/* update offset */
out->cd_offset += block_size;
}
/* Update pointer to next block of data to be processed. */
if (ctx->ecb_remainder_len != 0) {
datap += need;
ctx->ecb_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ecb_remainder, remainder);
ctx->ecb_remainder_len = remainder;
ctx->ecb_copy_to = datap;
goto out;
}
ctx->ecb_copy_to = NULL;
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
void *
ecb_alloc_ctx(int kmflag)
{
ecb_ctx_t *ecb_ctx;
if ((ecb_ctx = kmem_zalloc(sizeof (ecb_ctx_t), kmflag)) == NULL)
return (NULL);
ecb_ctx->ecb_flags = ECB_MODE;
return (ecb_ctx);
}

748
module/icp/algs/modes/gcm.c Normal file
View File

@ -0,0 +1,748 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/byteorder.h>
#ifdef __amd64
#ifdef _KERNEL
/* Workaround for no XMM kernel thread save/restore */
#define KPREEMPT_DISABLE kpreempt_disable()
#define KPREEMPT_ENABLE kpreempt_enable()
#else
#define KPREEMPT_DISABLE
#define KPREEMPT_ENABLE
#endif /* _KERNEL */
extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
static int intel_pclmulqdq_instruction_present(void);
#endif /* __amd64 */
struct aes_block {
uint64_t a;
uint64_t b;
};
/*
* gcm_mul()
* Perform a carry-less multiplication (that is, use XOR instead of the
* multiply operator) on *x_in and *y and place the result in *res.
*
* Byte swap the input (*x_in and *y) and the output (*res).
*
* Note: x_in, y, and res all point to 16-byte numbers (an array of two
* 64-bit integers).
*/
void
gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
{
#ifdef __amd64
if (intel_pclmulqdq_instruction_present()) {
KPREEMPT_DISABLE;
gcm_mul_pclmulqdq(x_in, y, res);
KPREEMPT_ENABLE;
} else
#endif /* __amd64 */
{
static const uint64_t R = 0xe100000000000000ULL;
struct aes_block z = {0, 0};
struct aes_block v;
uint64_t x;
int i, j;
v.a = ntohll(y[0]);
v.b = ntohll(y[1]);
for (j = 0; j < 2; j++) {
x = ntohll(x_in[j]);
for (i = 0; i < 64; i++, x <<= 1) {
if (x & 0x8000000000000000ULL) {
z.a ^= v.a;
z.b ^= v.b;
}
if (v.b & 1ULL) {
v.b = (v.a << 63)|(v.b >> 1);
v.a = (v.a >> 1) ^ R;
} else {
v.b = (v.a << 63)|(v.b >> 1);
v.a = v.a >> 1;
}
}
}
res[0] = htonll(z.a);
res[1] = htonll(z.b);
}
}
#define GHASH(c, d, t) \
xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
(uint64_t *)(void *)(t));
/*
* Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
* is done in another function.
*/
int
gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
uint8_t *blockp;
uint8_t *lastp;
void *iov_or_mp;
offset_t offset;
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
uint64_t counter;
uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
if (length + ctx->gcm_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
length);
ctx->gcm_remainder_len += length;
ctx->gcm_copy_to = datap;
return (CRYPTO_SUCCESS);
}
lastp = (uint8_t *)ctx->gcm_cb;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
if (ctx->gcm_remainder_len > 0) {
need = block_size - ctx->gcm_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
[ctx->gcm_remainder_len], need);
blockp = (uint8_t *)ctx->gcm_remainder;
} else {
blockp = datap;
}
/*
* Increment counter. Counter bits are confined
* to the bottom 32 bits of the counter block.
*/
counter = ntohll(ctx->gcm_cb[1] & counter_mask);
counter = htonll(counter + 1);
counter &= counter_mask;
ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
(uint8_t *)ctx->gcm_tmp);
xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
lastp = (uint8_t *)ctx->gcm_tmp;
ctx->gcm_processed_data_len += block_size;
if (out == NULL) {
if (ctx->gcm_remainder_len > 0) {
bcopy(blockp, ctx->gcm_copy_to,
ctx->gcm_remainder_len);
bcopy(blockp + ctx->gcm_remainder_len, datap,
need);
}
} else {
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
bcopy(lastp, out_data_1, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len,
out_data_2,
block_size - out_data_1_len);
}
}
/* update offset */
out->cd_offset += block_size;
}
/* add ciphertext to the hash */
GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
/* Update pointer to next block of data to be processed. */
if (ctx->gcm_remainder_len != 0) {
datap += need;
ctx->gcm_remainder_len = 0;
} else {
datap += block_size;
}
remainder = (size_t)&data[length] - (size_t)datap;
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->gcm_remainder, remainder);
ctx->gcm_remainder_len = remainder;
ctx->gcm_copy_to = datap;
goto out;
}
ctx->gcm_copy_to = NULL;
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
int
gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
uint8_t *ghash, *macp = NULL;
int i, rv;
if (out->cd_length <
(ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
return (CRYPTO_DATA_LEN_RANGE);
}
ghash = (uint8_t *)ctx->gcm_ghash;
if (ctx->gcm_remainder_len > 0) {
uint64_t counter;
uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
/*
* Here is where we deal with data that is not a
* multiple of the block size.
*/
/*
* Increment counter.
*/
counter = ntohll(ctx->gcm_cb[1] & counter_mask);
counter = htonll(counter + 1);
counter &= counter_mask;
ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
(uint8_t *)ctx->gcm_tmp);
macp = (uint8_t *)ctx->gcm_remainder;
bzero(macp + ctx->gcm_remainder_len,
block_size - ctx->gcm_remainder_len);
/* XOR with counter block */
for (i = 0; i < ctx->gcm_remainder_len; i++) {
macp[i] ^= tmpp[i];
}
/* add ciphertext to the hash */
GHASH(ctx, macp, ghash);
ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
}
ctx->gcm_len_a_len_c[1] =
htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
(uint8_t *)ctx->gcm_J0);
xor_block((uint8_t *)ctx->gcm_J0, ghash);
if (ctx->gcm_remainder_len > 0) {
rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
out->cd_offset += ctx->gcm_remainder_len;
ctx->gcm_remainder_len = 0;
rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
if (rv != CRYPTO_SUCCESS)
return (rv);
out->cd_offset += ctx->gcm_tag_len;
return (CRYPTO_SUCCESS);
}
/*
* This will only deal with decrypting the last block of the input that
* might not be a multiple of block length.
*/
static void
gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
uint8_t *datap, *outp, *counterp;
uint64_t counter;
uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
int i;
/*
* Increment counter.
* Counter bits are confined to the bottom 32 bits
*/
counter = ntohll(ctx->gcm_cb[1] & counter_mask);
counter = htonll(counter + 1);
counter &= counter_mask;
ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
datap = (uint8_t *)ctx->gcm_remainder;
outp = &((ctx->gcm_pt_buf)[index]);
counterp = (uint8_t *)ctx->gcm_tmp;
/* authentication tag */
bzero((uint8_t *)ctx->gcm_tmp, block_size);
bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
/* add ciphertext to the hash */
GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
/* decrypt remaining ciphertext */
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
/* XOR with counter block */
for (i = 0; i < ctx->gcm_remainder_len; i++) {
outp[i] = datap[i] ^ counterp[i];
}
}
/* ARGSUSED */
int
gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t new_len;
uint8_t *new;
/*
* Copy contiguous ciphertext input blocks to plaintext buffer.
* Ciphertext will be decrypted in the final.
*/
if (length > 0) {
new_len = ctx->gcm_pt_buf_len + length;
new = vmem_alloc(new_len, ctx->gcm_kmflag);
bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
if (new == NULL)
return (CRYPTO_HOST_MEMORY);
ctx->gcm_pt_buf = new;
ctx->gcm_pt_buf_len = new_len;
bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
length);
ctx->gcm_processed_data_len += length;
}
ctx->gcm_remainder_len = 0;
return (CRYPTO_SUCCESS);
}
int
gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
size_t pt_len;
size_t remainder;
uint8_t *ghash;
uint8_t *blockp;
uint8_t *cbp;
uint64_t counter;
uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
int processed = 0, rv;
ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
ghash = (uint8_t *)ctx->gcm_ghash;
blockp = ctx->gcm_pt_buf;
remainder = pt_len;
while (remainder > 0) {
/* Incomplete last block */
if (remainder < block_size) {
bcopy(blockp, ctx->gcm_remainder, remainder);
ctx->gcm_remainder_len = remainder;
/*
* not expecting anymore ciphertext, just
* compute plaintext for the remaining input
*/
gcm_decrypt_incomplete_block(ctx, block_size,
processed, encrypt_block, xor_block);
ctx->gcm_remainder_len = 0;
goto out;
}
/* add ciphertext to the hash */
GHASH(ctx, blockp, ghash);
/*
* Increment counter.
* Counter bits are confined to the bottom 32 bits
*/
counter = ntohll(ctx->gcm_cb[1] & counter_mask);
counter = htonll(counter + 1);
counter &= counter_mask;
ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
cbp = (uint8_t *)ctx->gcm_tmp;
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
/* XOR with ciphertext */
xor_block(cbp, blockp);
processed += block_size;
blockp += block_size;
remainder -= block_size;
}
out:
ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
(uint8_t *)ctx->gcm_J0);
xor_block((uint8_t *)ctx->gcm_J0, ghash);
/* compare the input authentication tag with what we calculated */
if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
/* They don't match */
return (CRYPTO_INVALID_MAC);
} else {
rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
if (rv != CRYPTO_SUCCESS)
return (rv);
out->cd_offset += pt_len;
}
return (CRYPTO_SUCCESS);
}
static int
gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
{
size_t tag_len;
/*
* Check the length of the authentication tag (in bits).
*/
tag_len = gcm_param->ulTagBits;
switch (tag_len) {
case 32:
case 64:
case 96:
case 104:
case 112:
case 120:
case 128:
break;
default:
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
if (gcm_param->ulIvLen == 0)
return (CRYPTO_MECHANISM_PARAM_INVALID);
return (CRYPTO_SUCCESS);
}
static void
gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
gcm_ctx_t *ctx, size_t block_size,
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
uint8_t *cb;
ulong_t remainder = iv_len;
ulong_t processed = 0;
uint8_t *datap, *ghash;
uint64_t len_a_len_c[2];
ghash = (uint8_t *)ctx->gcm_ghash;
cb = (uint8_t *)ctx->gcm_cb;
if (iv_len == 12) {
bcopy(iv, cb, 12);
cb[12] = 0;
cb[13] = 0;
cb[14] = 0;
cb[15] = 1;
/* J0 will be used again in the final */
copy_block(cb, (uint8_t *)ctx->gcm_J0);
} else {
/* GHASH the IV */
do {
if (remainder < block_size) {
bzero(cb, block_size);
bcopy(&(iv[processed]), cb, remainder);
datap = (uint8_t *)cb;
remainder = 0;
} else {
datap = (uint8_t *)(&(iv[processed]));
processed += block_size;
remainder -= block_size;
}
GHASH(ctx, datap, ghash);
} while (remainder > 0);
len_a_len_c[0] = 0;
len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
GHASH(ctx, len_a_len_c, ctx->gcm_J0);
/* J0 will be used again in the final */
copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
}
}
/*
* The following function is called at encrypt or decrypt init time
* for AES GCM mode.
*/
int
gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
unsigned char *auth_data, size_t auth_data_len, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
uint8_t *ghash, *datap, *authp;
size_t remainder, processed;
/* encrypt zero block to get subkey H */
bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
(uint8_t *)ctx->gcm_H);
gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
copy_block, xor_block);
authp = (uint8_t *)ctx->gcm_tmp;
ghash = (uint8_t *)ctx->gcm_ghash;
bzero(authp, block_size);
bzero(ghash, block_size);
processed = 0;
remainder = auth_data_len;
do {
if (remainder < block_size) {
/*
* There's not a block full of data, pad rest of
* buffer with zero
*/
bzero(authp, block_size);
bcopy(&(auth_data[processed]), authp, remainder);
datap = (uint8_t *)authp;
remainder = 0;
} else {
datap = (uint8_t *)(&(auth_data[processed]));
processed += block_size;
remainder -= block_size;
}
/* add auth data to the hash */
GHASH(ctx, datap, ghash);
} while (remainder > 0);
return (CRYPTO_SUCCESS);
}
int
gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
int rv;
CK_AES_GCM_PARAMS *gcm_param;
if (param != NULL) {
gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
if ((rv = gcm_validate_args(gcm_param)) != 0) {
return (rv);
}
gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
gcm_ctx->gcm_tag_len >>= 3;
gcm_ctx->gcm_processed_data_len = 0;
/* these values are in bits */
gcm_ctx->gcm_len_a_len_c[0]
= htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
rv = CRYPTO_SUCCESS;
gcm_ctx->gcm_flags |= GCM_MODE;
} else {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
goto out;
}
if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
gcm_param->pAAD, gcm_param->ulAADLen, block_size,
encrypt_block, copy_block, xor_block) != 0) {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
}
out:
return (rv);
}
int
gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
int rv;
CK_AES_GMAC_PARAMS *gmac_param;
if (param != NULL) {
gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
gcm_ctx->gcm_processed_data_len = 0;
/* these values are in bits */
gcm_ctx->gcm_len_a_len_c[0]
= htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
rv = CRYPTO_SUCCESS;
gcm_ctx->gcm_flags |= GMAC_MODE;
} else {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
goto out;
}
if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
gmac_param->pAAD, gmac_param->ulAADLen, block_size,
encrypt_block, copy_block, xor_block) != 0) {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
}
out:
return (rv);
}
void *
gcm_alloc_ctx(int kmflag)
{
gcm_ctx_t *gcm_ctx;
if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
return (NULL);
gcm_ctx->gcm_flags = GCM_MODE;
return (gcm_ctx);
}
void *
gmac_alloc_ctx(int kmflag)
{
gcm_ctx_t *gcm_ctx;
if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
return (NULL);
gcm_ctx->gcm_flags = GMAC_MODE;
return (gcm_ctx);
}
void
gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
{
ctx->gcm_kmflag = kmflag;
}
#ifdef __amd64
#define INTEL_PCLMULQDQ_FLAG (1 << 1)
/*
* Return 1 if executing on Intel with PCLMULQDQ instructions,
* otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
* Cache the result, as the CPU can't change.
*
* Note: the userland version uses getisax(). The kernel version uses
* is_x86_featureset().
*/
static int
intel_pclmulqdq_instruction_present(void)
{
static int cached_result = -1;
unsigned eax, ebx, ecx, edx;
unsigned func, subfunc;
if (cached_result == -1) { /* first time */
/* check for an intel cpu */
func = 0;
subfunc = 0;
__asm__ __volatile__(
"cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
memcmp((char *) (&edx), "ineI", 4) == 0 &&
memcmp((char *) (&ecx), "ntel", 4) == 0) {
func = 1;
subfunc = 0;
/* check for aes-ni instruction set */
__asm__ __volatile__(
"cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
cached_result = !!(ecx & INTEL_PCLMULQDQ_FLAG);
} else {
cached_result = 0;
}
}
return (cached_result);
}
#endif /* __amd64 */

View File

@ -0,0 +1,159 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
/*
* Initialize by setting iov_or_mp to point to the current iovec or mp,
* and by setting current_offset to an offset within the current iovec or mp.
*/
void
crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset)
{
offset_t offset;
switch (out->cd_format) {
case CRYPTO_DATA_RAW:
*current_offset = out->cd_offset;
break;
case CRYPTO_DATA_UIO: {
uio_t *uiop = out->cd_uio;
uintptr_t vec_idx;
offset = out->cd_offset;
for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
offset >= uiop->uio_iov[vec_idx].iov_len;
offset -= uiop->uio_iov[vec_idx++].iov_len)
;
*current_offset = offset;
*iov_or_mp = (void *)vec_idx;
break;
}
} /* end switch */
}
/*
* Get pointers for where in the output to copy a block of encrypted or
* decrypted data. The iov_or_mp argument stores a pointer to the current
* iovec or mp, and offset stores an offset into the current iovec or mp.
*/
void
crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset,
uint8_t **out_data_1, size_t *out_data_1_len, uint8_t **out_data_2,
size_t amt)
{
offset_t offset;
switch (out->cd_format) {
case CRYPTO_DATA_RAW: {
iovec_t *iov;
offset = *current_offset;
iov = &out->cd_raw;
if ((offset + amt) <= iov->iov_len) {
/* one block fits */
*out_data_1 = (uint8_t *)iov->iov_base + offset;
*out_data_1_len = amt;
*out_data_2 = NULL;
*current_offset = offset + amt;
}
break;
}
case CRYPTO_DATA_UIO: {
uio_t *uio = out->cd_uio;
iovec_t *iov;
offset_t offset;
uintptr_t vec_idx;
uint8_t *p;
offset = *current_offset;
vec_idx = (uintptr_t)(*iov_or_mp);
iov = (iovec_t *)&uio->uio_iov[vec_idx];
p = (uint8_t *)iov->iov_base + offset;
*out_data_1 = p;
if (offset + amt <= iov->iov_len) {
/* can fit one block into this iov */
*out_data_1_len = amt;
*out_data_2 = NULL;
*current_offset = offset + amt;
} else {
/* one block spans two iovecs */
*out_data_1_len = iov->iov_len - offset;
if (vec_idx == uio->uio_iovcnt)
return;
vec_idx++;
iov = (iovec_t *)&uio->uio_iov[vec_idx];
*out_data_2 = (uint8_t *)iov->iov_base;
*current_offset = amt - *out_data_1_len;
}
*iov_or_mp = (void *)vec_idx;
break;
}
} /* end switch */
}
void
crypto_free_mode_ctx(void *ctx)
{
common_ctx_t *common_ctx = (common_ctx_t *)ctx;
switch (common_ctx->cc_flags &
(ECB_MODE|CBC_MODE|CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) {
case ECB_MODE:
kmem_free(common_ctx, sizeof (ecb_ctx_t));
break;
case CBC_MODE:
kmem_free(common_ctx, sizeof (cbc_ctx_t));
break;
case CTR_MODE:
kmem_free(common_ctx, sizeof (ctr_ctx_t));
break;
case CCM_MODE:
if (((ccm_ctx_t *)ctx)->ccm_pt_buf != NULL)
vmem_free(((ccm_ctx_t *)ctx)->ccm_pt_buf,
((ccm_ctx_t *)ctx)->ccm_data_len);
kmem_free(ctx, sizeof (ccm_ctx_t));
break;
case GCM_MODE:
case GMAC_MODE:
if (((gcm_ctx_t *)ctx)->gcm_pt_buf != NULL)
vmem_free(((gcm_ctx_t *)ctx)->gcm_pt_buf,
((gcm_ctx_t *)ctx)->gcm_pt_buf_len);
kmem_free(ctx, sizeof (gcm_ctx_t));
}
}

663
module/icp/algs/sha1/sha1.c Normal file
View File

@ -0,0 +1,663 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* The basic framework for this code came from the reference
* implementation for MD5. That implementation is Copyright (C)
* 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD5 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD5 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software for any particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
*
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*
* NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
* standard, available at http://www.itl.nist.gov/fipspubs/fip180-1.htm
* Not as fast as one would like -- further optimizations are encouraged
* and appreciated.
*/
#include <sys/zfs_context.h>
#include <sha1/sha1.h>
#include <sha1/sha1_consts.h>
#ifdef _LITTLE_ENDIAN
#include <sys/byteorder.h>
#define HAVE_HTONL
#endif
#define _RESTRICT_KYWD
static void Encode(uint8_t *, const uint32_t *, size_t);
#if defined(__amd64)
#define SHA1_TRANSFORM(ctx, in) sha1_block_data_order((ctx), (in), 1)
#define SHA1_TRANSFORM_BLOCKS(ctx, in, num) sha1_block_data_order((ctx), \
(in), (num))
void sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t num_blocks);
#else
#define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in))
static void SHA1Transform(SHA1_CTX *, const uint8_t *);
#endif
static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
/*
* F, G, and H are the basic SHA1 functions.
*/
#define F(b, c, d) (((b) & (c)) | ((~b) & (d)))
#define G(b, c, d) ((b) ^ (c) ^ (d))
#define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d)))
/*
* ROTATE_LEFT rotates x left n bits.
*/
#if defined(__GNUC__) && defined(_LP64)
static __inline__ uint64_t
ROTATE_LEFT(uint64_t value, uint32_t n)
{
uint32_t t32;
t32 = (uint32_t)value;
return ((t32 << n) | (t32 >> (32 - n)));
}
#else
#define ROTATE_LEFT(x, n) \
(((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
#endif
/*
* SHA1Init()
*
* purpose: initializes the sha1 context and begins and sha1 digest operation
* input: SHA1_CTX * : the context to initializes.
* output: void
*/
void
SHA1Init(SHA1_CTX *ctx)
{
ctx->count[0] = ctx->count[1] = 0;
/*
* load magic initialization constants. Tell lint
* that these constants are unsigned by using U.
*/
ctx->state[0] = 0x67452301U;
ctx->state[1] = 0xefcdab89U;
ctx->state[2] = 0x98badcfeU;
ctx->state[3] = 0x10325476U;
ctx->state[4] = 0xc3d2e1f0U;
}
void
SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len)
{
uint32_t i, buf_index, buf_len;
const uint8_t *input = inptr;
#if defined(__amd64)
uint32_t block_count;
#endif /* __amd64 */
/* check for noop */
if (input_len == 0)
return;
/* compute number of bytes mod 64 */
buf_index = (ctx->count[1] >> 3) & 0x3F;
/* update number of bits */
if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
ctx->count[0]++;
ctx->count[0] += (input_len >> 29);
buf_len = 64 - buf_index;
/* transform as many times as possible */
i = 0;
if (input_len >= buf_len) {
/*
* general optimization:
*
* only do initial bcopy() and SHA1Transform() if
* buf_index != 0. if buf_index == 0, we're just
* wasting our time doing the bcopy() since there
* wasn't any data left over from a previous call to
* SHA1Update().
*/
if (buf_index) {
bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
SHA1_TRANSFORM(ctx, ctx->buf_un.buf8);
i = buf_len;
}
#if !defined(__amd64)
for (; i + 63 < input_len; i += 64)
SHA1_TRANSFORM(ctx, &input[i]);
#else
block_count = (input_len - i) >> 6;
if (block_count > 0) {
SHA1_TRANSFORM_BLOCKS(ctx, &input[i], block_count);
i += block_count << 6;
}
#endif /* !__amd64 */
/*
* general optimization:
*
* if i and input_len are the same, return now instead
* of calling bcopy(), since the bcopy() in this case
* will be an expensive nop.
*/
if (input_len == i)
return;
buf_index = 0;
}
/* buffer remaining input */
bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
}
/*
* SHA1Final()
*
* purpose: ends an sha1 digest operation, finalizing the message digest and
* zeroing the context.
* input: uchar_t * : A buffer to store the digest.
* : The function actually uses void* because many
* : callers pass things other than uchar_t here.
* SHA1_CTX * : the context to finalize, save, and zero
* output: void
*/
void
SHA1Final(void *digest, SHA1_CTX *ctx)
{
uint8_t bitcount_be[sizeof (ctx->count)];
uint32_t index = (ctx->count[1] >> 3) & 0x3f;
/* store bit count, big endian */
Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
/* pad out to 56 mod 64 */
SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
/* append length (before padding) */
SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
/* store state in digest */
Encode(digest, ctx->state, sizeof (ctx->state));
/* zeroize sensitive information */
bzero(ctx, sizeof (*ctx));
}
#if !defined(__amd64)
typedef uint32_t sha1word;
/*
* sparc optimization:
*
* on the sparc, we can load big endian 32-bit data easily. note that
* special care must be taken to ensure the address is 32-bit aligned.
* in the interest of speed, we don't check to make sure, since
* careful programming can guarantee this for us.
*/
#if defined(_BIG_ENDIAN)
#define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
#elif defined(HAVE_HTONL)
#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
#else
/* little endian -- will work on big endian, but slowly */
#define LOAD_BIG_32(addr) \
(((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
#endif /* _BIG_ENDIAN */
/*
* SHA1Transform()
*/
#if defined(W_ARRAY)
#define W(n) w[n]
#else /* !defined(W_ARRAY) */
#define W(n) w_ ## n
#endif /* !defined(W_ARRAY) */
void /* CSTYLED */
SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64])
{
/* CSTYLED */
sha1word a = ctx->state[0];
sha1word b = ctx->state[1];
sha1word c = ctx->state[2];
sha1word d = ctx->state[3];
sha1word e = ctx->state[4];
#if defined(W_ARRAY)
sha1word w[16];
#else /* !defined(W_ARRAY) */
sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7;
sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
#endif /* !defined(W_ARRAY) */
W(0) = LOAD_BIG_32((void *)(blk + 0));
W(1) = LOAD_BIG_32((void *)(blk + 4));
W(2) = LOAD_BIG_32((void *)(blk + 8));
W(3) = LOAD_BIG_32((void *)(blk + 12));
W(4) = LOAD_BIG_32((void *)(blk + 16));
W(5) = LOAD_BIG_32((void *)(blk + 20));
W(6) = LOAD_BIG_32((void *)(blk + 24));
W(7) = LOAD_BIG_32((void *)(blk + 28));
W(8) = LOAD_BIG_32((void *)(blk + 32));
W(9) = LOAD_BIG_32((void *)(blk + 36));
W(10) = LOAD_BIG_32((void *)(blk + 40));
W(11) = LOAD_BIG_32((void *)(blk + 44));
W(12) = LOAD_BIG_32((void *)(blk + 48));
W(13) = LOAD_BIG_32((void *)(blk + 52));
W(14) = LOAD_BIG_32((void *)(blk + 56));
W(15) = LOAD_BIG_32((void *)(blk + 60));
/*
* general optimization:
*
* even though this approach is described in the standard as
* being slower algorithmically, it is 30-40% faster than the
* "faster" version under SPARC, because this version has more
* of the constraints specified at compile-time and uses fewer
* variables (and therefore has better register utilization)
* than its "speedier" brother. (i've tried both, trust me)
*
* for either method given in the spec, there is an "assignment"
* phase where the following takes place:
*
* tmp = (main_computation);
* e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
*
* we can make the algorithm go faster by not doing this work,
* but just pretending that `d' is now `e', etc. this works
* really well and obviates the need for a temporary variable.
* however, we still explicitly perform the rotate action,
* since it is cheaper on SPARC to do it once than to have to
* do it over and over again.
*/
/* round 1 */
e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */
b = ROTATE_LEFT(b, 30);
d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */
a = ROTATE_LEFT(a, 30);
c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */
e = ROTATE_LEFT(e, 30);
b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */
d = ROTATE_LEFT(d, 30);
a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */
c = ROTATE_LEFT(c, 30);
e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */
b = ROTATE_LEFT(b, 30);
d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */
a = ROTATE_LEFT(a, 30);
c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */
e = ROTATE_LEFT(e, 30);
b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */
d = ROTATE_LEFT(d, 30);
a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */
c = ROTATE_LEFT(c, 30);
e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */
b = ROTATE_LEFT(b, 30);
d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */
a = ROTATE_LEFT(a, 30);
c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */
e = ROTATE_LEFT(e, 30);
b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */
d = ROTATE_LEFT(d, 30);
a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */
c = ROTATE_LEFT(c, 30);
e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */
b = ROTATE_LEFT(b, 30);
W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */
d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0);
a = ROTATE_LEFT(a, 30);
W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */
c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0);
e = ROTATE_LEFT(e, 30);
W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */
b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0);
d = ROTATE_LEFT(d, 30);
W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */
a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0);
c = ROTATE_LEFT(c, 30);
/* round 2 */
W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1);
b = ROTATE_LEFT(b, 30);
W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1);
a = ROTATE_LEFT(a, 30);
W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1);
e = ROTATE_LEFT(e, 30);
W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1);
d = ROTATE_LEFT(d, 30);
W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */
a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1);
c = ROTATE_LEFT(c, 30);
W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1);
b = ROTATE_LEFT(b, 30);
W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1);
a = ROTATE_LEFT(a, 30);
W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1);
e = ROTATE_LEFT(e, 30);
W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1);
d = ROTATE_LEFT(d, 30);
W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */
a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1);
c = ROTATE_LEFT(c, 30);
W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1);
b = ROTATE_LEFT(b, 30);
W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1);
a = ROTATE_LEFT(a, 30);
W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1);
e = ROTATE_LEFT(e, 30);
W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1);
d = ROTATE_LEFT(d, 30);
W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */
a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1);
c = ROTATE_LEFT(c, 30);
W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1);
b = ROTATE_LEFT(b, 30);
W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1);
a = ROTATE_LEFT(a, 30);
W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1);
e = ROTATE_LEFT(e, 30);
W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1);
d = ROTATE_LEFT(d, 30);
W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */
a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1);
c = ROTATE_LEFT(c, 30);
/* round 3 */
W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */
e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2);
b = ROTATE_LEFT(b, 30);
W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */
d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2);
a = ROTATE_LEFT(a, 30);
W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */
c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2);
e = ROTATE_LEFT(e, 30);
W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */
b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2);
d = ROTATE_LEFT(d, 30);
W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */
a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2);
c = ROTATE_LEFT(c, 30);
W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */
e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2);
b = ROTATE_LEFT(b, 30);
W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */
d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2);
a = ROTATE_LEFT(a, 30);
W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */
c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2);
e = ROTATE_LEFT(e, 30);
W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */
b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2);
d = ROTATE_LEFT(d, 30);
W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */
a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2);
c = ROTATE_LEFT(c, 30);
W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */
e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2);
b = ROTATE_LEFT(b, 30);
W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */
d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2);
a = ROTATE_LEFT(a, 30);
W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */
c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2);
e = ROTATE_LEFT(e, 30);
W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */
b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2);
d = ROTATE_LEFT(d, 30);
W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */
a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2);
c = ROTATE_LEFT(c, 30);
W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */
e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2);
b = ROTATE_LEFT(b, 30);
W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */
d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2);
a = ROTATE_LEFT(a, 30);
W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */
c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2);
e = ROTATE_LEFT(e, 30);
W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */
b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2);
d = ROTATE_LEFT(d, 30);
W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */
a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2);
c = ROTATE_LEFT(c, 30);
/* round 4 */
W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3);
b = ROTATE_LEFT(b, 30);
W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3);
a = ROTATE_LEFT(a, 30);
W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3);
e = ROTATE_LEFT(e, 30);
W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3);
d = ROTATE_LEFT(d, 30);
W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */
a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3);
c = ROTATE_LEFT(c, 30);
W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3);
b = ROTATE_LEFT(b, 30);
W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3);
a = ROTATE_LEFT(a, 30);
W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3);
e = ROTATE_LEFT(e, 30);
W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3);
d = ROTATE_LEFT(d, 30);
W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */
a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3);
c = ROTATE_LEFT(c, 30);
W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3);
b = ROTATE_LEFT(b, 30);
W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3);
a = ROTATE_LEFT(a, 30);
W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3);
e = ROTATE_LEFT(e, 30);
W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3);
d = ROTATE_LEFT(d, 30);
W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */
a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3);
c = ROTATE_LEFT(c, 30);
W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */
e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3);
b = ROTATE_LEFT(b, 30);
W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */
d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3);
a = ROTATE_LEFT(a, 30);
W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */
c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3);
e = ROTATE_LEFT(e, 30);
W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */
b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3);
d = ROTATE_LEFT(d, 30);
W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */
ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) +
SHA1_CONST(3);
ctx->state[1] += b;
ctx->state[2] += ROTATE_LEFT(c, 30);
ctx->state[3] += d;
ctx->state[4] += e;
/* zeroize sensitive information */
W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0;
W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0;
}
#endif /* !__amd64 */
/*
* Encode()
*
* purpose: to convert a list of numbers from little endian to big endian
* input: uint8_t * : place to store the converted big endian numbers
* uint32_t * : place to get numbers to convert from
* size_t : the length of the input in bytes
* output: void
*/
static void
Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input,
size_t len)
{
size_t i, j;
for (i = 0, j = 0; j < len; i++, j += 4) {
output[j] = (input[i] >> 24) & 0xff;
output[j + 1] = (input[i] >> 16) & 0xff;
output[j + 2] = (input[i] >> 8) & 0xff;
output[j + 3] = input[i] & 0xff;
}
}

495
module/icp/algs/sha2/sha2.c Normal file
View File

@ -0,0 +1,495 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
/*
* The basic framework for this code came from the reference
* implementation for MD5. That implementation is Copyright (C)
* 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD5 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD5 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software for any particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
*
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*
* NOTE: Cleaned-up and optimized, version of SHA2, based on the FIPS 180-2
* standard, available at
* http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
* Not as fast as one would like -- further optimizations are encouraged
* and appreciated.
*/
#include <sys/zfs_context.h>
#define _SHA2_IMPL
#include <sha2/sha2.h>
#include <sha2/sha2_consts.h>
#define _RESTRICT_KYWD
#ifdef _LITTLE_ENDIAN
#include <sys/byteorder.h>
#define HAVE_HTONL
#endif
static void Encode(uint8_t *, uint32_t *, size_t);
#if defined(__amd64)
#define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1)
void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num);
#else
static void SHA256Transform(SHA2_CTX *, const uint8_t *);
#endif /* __amd64 */
static uint8_t PADDING[128] = { 0x80, /* all zeros */ };
/* Ch and Maj are the basic SHA2 functions. */
#define Ch(b, c, d) (((b) & (c)) ^ ((~b) & (d)))
#define Maj(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d)))
/* Rotates x right n bits. */
#define ROTR(x, n) \
(((x) >> (n)) | ((x) << ((sizeof (x) * NBBY)-(n))))
/* Shift x right n bits */
#define SHR(x, n) ((x) >> (n))
/* SHA256 Functions */
#define BIGSIGMA0_256(x) (ROTR((x), 2) ^ ROTR((x), 13) ^ ROTR((x), 22))
#define BIGSIGMA1_256(x) (ROTR((x), 6) ^ ROTR((x), 11) ^ ROTR((x), 25))
#define SIGMA0_256(x) (ROTR((x), 7) ^ ROTR((x), 18) ^ SHR((x), 3))
#define SIGMA1_256(x) (ROTR((x), 17) ^ ROTR((x), 19) ^ SHR((x), 10))
#define SHA256ROUND(a, b, c, d, e, f, g, h, i, w) \
T1 = h + BIGSIGMA1_256(e) + Ch(e, f, g) + SHA256_CONST(i) + w; \
d += T1; \
T2 = BIGSIGMA0_256(a) + Maj(a, b, c); \
h = T1 + T2
/*
* sparc optimization:
*
* on the sparc, we can load big endian 32-bit data easily. note that
* special care must be taken to ensure the address is 32-bit aligned.
* in the interest of speed, we don't check to make sure, since
* careful programming can guarantee this for us.
*/
#if defined(_BIG_ENDIAN)
#define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
#define LOAD_BIG_64(addr) (*(uint64_t *)(addr))
#elif defined(HAVE_HTONL)
#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
#define LOAD_BIG_64(addr) htonll(*((uint64_t *)(addr)))
#else
/* little endian -- will work on big endian, but slowly */
#define LOAD_BIG_32(addr) \
(((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
#define LOAD_BIG_64(addr) \
(((uint64_t)(addr)[0] << 56) | ((uint64_t)(addr)[1] << 48) | \
((uint64_t)(addr)[2] << 40) | ((uint64_t)(addr)[3] << 32) | \
((uint64_t)(addr)[4] << 24) | ((uint64_t)(addr)[5] << 16) | \
((uint64_t)(addr)[6] << 8) | (uint64_t)(addr)[7])
#endif /* _BIG_ENDIAN */
#if !defined(__amd64)
/* SHA256 Transform */
static void
SHA256Transform(SHA2_CTX *ctx, const uint8_t *blk)
{
uint32_t a = ctx->state.s32[0];
uint32_t b = ctx->state.s32[1];
uint32_t c = ctx->state.s32[2];
uint32_t d = ctx->state.s32[3];
uint32_t e = ctx->state.s32[4];
uint32_t f = ctx->state.s32[5];
uint32_t g = ctx->state.s32[6];
uint32_t h = ctx->state.s32[7];
uint32_t w0, w1, w2, w3, w4, w5, w6, w7;
uint32_t w8, w9, w10, w11, w12, w13, w14, w15;
uint32_t T1, T2;
if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */
bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
blk = (uint8_t *)ctx->buf_un.buf32;
}
/* LINTED E_BAD_PTR_CAST_ALIGN */
w0 = LOAD_BIG_32(blk + 4 * 0);
SHA256ROUND(a, b, c, d, e, f, g, h, 0, w0);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w1 = LOAD_BIG_32(blk + 4 * 1);
SHA256ROUND(h, a, b, c, d, e, f, g, 1, w1);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w2 = LOAD_BIG_32(blk + 4 * 2);
SHA256ROUND(g, h, a, b, c, d, e, f, 2, w2);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w3 = LOAD_BIG_32(blk + 4 * 3);
SHA256ROUND(f, g, h, a, b, c, d, e, 3, w3);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w4 = LOAD_BIG_32(blk + 4 * 4);
SHA256ROUND(e, f, g, h, a, b, c, d, 4, w4);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w5 = LOAD_BIG_32(blk + 4 * 5);
SHA256ROUND(d, e, f, g, h, a, b, c, 5, w5);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w6 = LOAD_BIG_32(blk + 4 * 6);
SHA256ROUND(c, d, e, f, g, h, a, b, 6, w6);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w7 = LOAD_BIG_32(blk + 4 * 7);
SHA256ROUND(b, c, d, e, f, g, h, a, 7, w7);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w8 = LOAD_BIG_32(blk + 4 * 8);
SHA256ROUND(a, b, c, d, e, f, g, h, 8, w8);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w9 = LOAD_BIG_32(blk + 4 * 9);
SHA256ROUND(h, a, b, c, d, e, f, g, 9, w9);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w10 = LOAD_BIG_32(blk + 4 * 10);
SHA256ROUND(g, h, a, b, c, d, e, f, 10, w10);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w11 = LOAD_BIG_32(blk + 4 * 11);
SHA256ROUND(f, g, h, a, b, c, d, e, 11, w11);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w12 = LOAD_BIG_32(blk + 4 * 12);
SHA256ROUND(e, f, g, h, a, b, c, d, 12, w12);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w13 = LOAD_BIG_32(blk + 4 * 13);
SHA256ROUND(d, e, f, g, h, a, b, c, 13, w13);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w14 = LOAD_BIG_32(blk + 4 * 14);
SHA256ROUND(c, d, e, f, g, h, a, b, 14, w14);
/* LINTED E_BAD_PTR_CAST_ALIGN */
w15 = LOAD_BIG_32(blk + 4 * 15);
SHA256ROUND(b, c, d, e, f, g, h, a, 15, w15);
w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
SHA256ROUND(a, b, c, d, e, f, g, h, 16, w0);
w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
SHA256ROUND(h, a, b, c, d, e, f, g, 17, w1);
w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
SHA256ROUND(g, h, a, b, c, d, e, f, 18, w2);
w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
SHA256ROUND(f, g, h, a, b, c, d, e, 19, w3);
w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
SHA256ROUND(e, f, g, h, a, b, c, d, 20, w4);
w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
SHA256ROUND(d, e, f, g, h, a, b, c, 21, w5);
w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
SHA256ROUND(c, d, e, f, g, h, a, b, 22, w6);
w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
SHA256ROUND(b, c, d, e, f, g, h, a, 23, w7);
w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
SHA256ROUND(a, b, c, d, e, f, g, h, 24, w8);
w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
SHA256ROUND(h, a, b, c, d, e, f, g, 25, w9);
w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
SHA256ROUND(g, h, a, b, c, d, e, f, 26, w10);
w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
SHA256ROUND(f, g, h, a, b, c, d, e, 27, w11);
w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
SHA256ROUND(e, f, g, h, a, b, c, d, 28, w12);
w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
SHA256ROUND(d, e, f, g, h, a, b, c, 29, w13);
w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
SHA256ROUND(c, d, e, f, g, h, a, b, 30, w14);
w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
SHA256ROUND(b, c, d, e, f, g, h, a, 31, w15);
w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
SHA256ROUND(a, b, c, d, e, f, g, h, 32, w0);
w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
SHA256ROUND(h, a, b, c, d, e, f, g, 33, w1);
w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
SHA256ROUND(g, h, a, b, c, d, e, f, 34, w2);
w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
SHA256ROUND(f, g, h, a, b, c, d, e, 35, w3);
w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
SHA256ROUND(e, f, g, h, a, b, c, d, 36, w4);
w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
SHA256ROUND(d, e, f, g, h, a, b, c, 37, w5);
w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
SHA256ROUND(c, d, e, f, g, h, a, b, 38, w6);
w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
SHA256ROUND(b, c, d, e, f, g, h, a, 39, w7);
w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
SHA256ROUND(a, b, c, d, e, f, g, h, 40, w8);
w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
SHA256ROUND(h, a, b, c, d, e, f, g, 41, w9);
w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
SHA256ROUND(g, h, a, b, c, d, e, f, 42, w10);
w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
SHA256ROUND(f, g, h, a, b, c, d, e, 43, w11);
w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
SHA256ROUND(e, f, g, h, a, b, c, d, 44, w12);
w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
SHA256ROUND(d, e, f, g, h, a, b, c, 45, w13);
w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
SHA256ROUND(c, d, e, f, g, h, a, b, 46, w14);
w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
SHA256ROUND(b, c, d, e, f, g, h, a, 47, w15);
w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
SHA256ROUND(a, b, c, d, e, f, g, h, 48, w0);
w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
SHA256ROUND(h, a, b, c, d, e, f, g, 49, w1);
w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
SHA256ROUND(g, h, a, b, c, d, e, f, 50, w2);
w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
SHA256ROUND(f, g, h, a, b, c, d, e, 51, w3);
w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
SHA256ROUND(e, f, g, h, a, b, c, d, 52, w4);
w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
SHA256ROUND(d, e, f, g, h, a, b, c, 53, w5);
w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
SHA256ROUND(c, d, e, f, g, h, a, b, 54, w6);
w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
SHA256ROUND(b, c, d, e, f, g, h, a, 55, w7);
w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
SHA256ROUND(a, b, c, d, e, f, g, h, 56, w8);
w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
SHA256ROUND(h, a, b, c, d, e, f, g, 57, w9);
w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
SHA256ROUND(g, h, a, b, c, d, e, f, 58, w10);
w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
SHA256ROUND(f, g, h, a, b, c, d, e, 59, w11);
w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
SHA256ROUND(e, f, g, h, a, b, c, d, 60, w12);
w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
SHA256ROUND(d, e, f, g, h, a, b, c, 61, w13);
w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
SHA256ROUND(c, d, e, f, g, h, a, b, 62, w14);
w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
SHA256ROUND(b, c, d, e, f, g, h, a, 63, w15);
ctx->state.s32[0] += a;
ctx->state.s32[1] += b;
ctx->state.s32[2] += c;
ctx->state.s32[3] += d;
ctx->state.s32[4] += e;
ctx->state.s32[5] += f;
ctx->state.s32[6] += g;
ctx->state.s32[7] += h;
}
#endif /* !__amd64 */
/*
* Encode()
*
* purpose: to convert a list of numbers from little endian to big endian
* input: uint8_t * : place to store the converted big endian numbers
* uint32_t * : place to get numbers to convert from
* size_t : the length of the input in bytes
* output: void
*/
static void
Encode(uint8_t *_RESTRICT_KYWD output, uint32_t *_RESTRICT_KYWD input,
size_t len)
{
size_t i, j;
for (i = 0, j = 0; j < len; i++, j += 4) {
output[j] = (input[i] >> 24) & 0xff;
output[j + 1] = (input[i] >> 16) & 0xff;
output[j + 2] = (input[i] >> 8) & 0xff;
output[j + 3] = input[i] & 0xff;
}
}
void
SHA2Init(uint64_t mech, SHA2_CTX *ctx)
{
switch (mech) {
case SHA256_MECH_INFO_TYPE:
case SHA256_HMAC_MECH_INFO_TYPE:
case SHA256_HMAC_GEN_MECH_INFO_TYPE:
ctx->state.s32[0] = 0x6a09e667U;
ctx->state.s32[1] = 0xbb67ae85U;
ctx->state.s32[2] = 0x3c6ef372U;
ctx->state.s32[3] = 0xa54ff53aU;
ctx->state.s32[4] = 0x510e527fU;
ctx->state.s32[5] = 0x9b05688cU;
ctx->state.s32[6] = 0x1f83d9abU;
ctx->state.s32[7] = 0x5be0cd19U;
break;
default:
cmn_err(CE_PANIC,
"sha2_init: failed to find a supported algorithm: 0x%x",
(uint32_t)mech);
}
ctx->algotype = (uint32_t)mech;
ctx->count.c64[0] = ctx->count.c64[1] = 0;
}
void
SHA256Init(SHA256_CTX *ctx)
{
SHA2Init(SHA256, ctx);
}
/*
* SHA2Update()
*
* purpose: continues an sha2 digest operation, using the message block
* to update the context.
* input: SHA2_CTX * : the context to update
* void * : the message block
* size_t : the length of the message block, in bytes
* output: void
*/
void
SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
{
uint32_t i, buf_index, buf_len, buf_limit;
const uint8_t *input = inptr;
uint32_t algotype = ctx->algotype;
#if defined(__amd64)
uint32_t block_count;
#endif /* !__amd64 */
/* check for noop */
if (input_len == 0)
return;
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
buf_limit = 64;
/* compute number of bytes mod 64 */
buf_index = (ctx->count.c32[1] >> 3) & 0x3F;
/* update number of bits */
if ((ctx->count.c32[1] += (input_len << 3)) < (input_len << 3))
ctx->count.c32[0]++;
ctx->count.c32[0] += (input_len >> 29);
} else {
buf_limit = 128;
/* compute number of bytes mod 128 */
buf_index = (ctx->count.c64[1] >> 3) & 0x7F;
/* update number of bits */
if ((ctx->count.c64[1] += (input_len << 3)) < (input_len << 3))
ctx->count.c64[0]++;
ctx->count.c64[0] += (input_len >> 29);
}
buf_len = buf_limit - buf_index;
/* transform as many times as possible */
i = 0;
if (input_len >= buf_len) {
/*
* general optimization:
*
* only do initial bcopy() and SHA2Transform() if
* buf_index != 0. if buf_index == 0, we're just
* wasting our time doing the bcopy() since there
* wasn't any data left over from a previous call to
* SHA2Update().
*/
if (buf_index) {
bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
SHA256Transform(ctx, ctx->buf_un.buf8);
i = buf_len;
}
#if !defined(__amd64)
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
for (; i + buf_limit - 1 < input_len; i += buf_limit) {
SHA256Transform(ctx, &input[i]);
}
}
#else
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
block_count = (input_len - i) >> 6;
if (block_count > 0) {
SHA256TransformBlocks(ctx, &input[i],
block_count);
i += block_count << 6;
}
}
#endif /* !__amd64 */
/*
* general optimization:
*
* if i and input_len are the same, return now instead
* of calling bcopy(), since the bcopy() in this case
* will be an expensive noop.
*/
if (input_len == i)
return;
buf_index = 0;
}
/* buffer remaining input */
bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
}
/*
* SHA2Final()
*
* purpose: ends an sha2 digest operation, finalizing the message digest and
* zeroing the context.
* input: uchar_t * : a buffer to store the digest
* : The function actually uses void* because many
* : callers pass things other than uchar_t here.
* SHA2_CTX * : the context to finalize, save, and zero
* output: void
*/
void
SHA2Final(void *digest, SHA2_CTX *ctx)
{
uint8_t bitcount_be[sizeof (ctx->count.c32)];
uint32_t index;
uint32_t algotype = ctx->algotype;
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
index = (ctx->count.c32[1] >> 3) & 0x3f;
Encode(bitcount_be, ctx->count.c32, sizeof (bitcount_be));
SHA2Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
SHA2Update(ctx, bitcount_be, sizeof (bitcount_be));
Encode(digest, ctx->state.s32, sizeof (ctx->state.s32));
}
/* zeroize sensitive information */
bzero(ctx, sizeof (*ctx));
}

935
module/icp/api/kcf_cipher.c Normal file
View File

@ -0,0 +1,935 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
#define CRYPTO_CIPHER_OFFSET(f) offsetof(crypto_cipher_ops_t, f)
/*
* Encryption and decryption routines.
*/
/*
* The following are the possible returned values common to all the routines
* below. The applicability of some of these return values depends on the
* presence of the arguments.
*
* CRYPTO_SUCCESS: The operation completed successfully.
* CRYPTO_QUEUED: A request was submitted successfully. The callback
* routine will be called when the operation is done.
* CRYPTO_INVALID_MECH_NUMBER, CRYPTO_INVALID_MECH_PARAM, or
* CRYPTO_INVALID_MECH for problems with the 'mech'.
* CRYPTO_INVALID_DATA for bogus 'data'
* CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work.
* CRYPTO_INVALID_CONTEXT: Not a valid context.
* CRYPTO_BUSY: Cannot process the request now. Schedule a
* crypto_bufcall(), or try later.
* CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED: No provider is
* capable of a function or a mechanism.
* CRYPTO_INVALID_KEY: bogus 'key' argument.
* CRYPTO_INVALID_PLAINTEXT: bogus 'plaintext' argument.
* CRYPTO_INVALID_CIPHERTEXT: bogus 'ciphertext' argument.
*/
/*
* crypto_cipher_init_prov()
*
* Arguments:
*
* pd: provider descriptor
* sid: session id
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* key: pointer to a crypto_key_t structure.
* tmpl: a crypto_ctx_template_t, opaque template of a context of an
* encryption or decryption with the 'mech' using 'key'.
* 'tmpl' is created by a previous call to
* crypto_create_ctx_template().
* ctxp: Pointer to a crypto_context_t.
* func: CRYPTO_FG_ENCRYPT or CRYPTO_FG_DECRYPT.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* This is a common function invoked internally by both
* crypto_encrypt_init() and crypto_decrypt_init().
* Asynchronously submits a request for, or synchronously performs the
* initialization of an encryption or a decryption operation.
* When possible and applicable, will internally use the pre-expanded key
* schedule from the context template, tmpl.
* When complete and successful, 'ctxp' will contain a crypto_context_t
* valid for later calls to encrypt_update() and encrypt_final(), or
* decrypt_update() and decrypt_final().
* The caller should hold a reference on the specified provider
* descriptor before calling this function.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
static int
crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key,
crypto_spi_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq, crypto_func_group_t func)
{
int error;
crypto_ctx_t *ctx;
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
if (func == CRYPTO_FG_ENCRYPT) {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_ENCRYPT);
} else {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_DECRYPT);
}
if (error != CRYPTO_SUCCESS)
return (error);
}
/* Allocate and initialize the canonical context */
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
if (func == CRYPTO_FG_ENCRYPT)
error = KCF_PROV_ENCRYPT_INIT(real_provider, ctx,
&lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
else {
ASSERT(func == CRYPTO_FG_DECRYPT);
error = KCF_PROV_DECRYPT_INIT(real_provider, ctx,
&lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
}
KCF_PROV_INCRSTATS(pd, error);
goto done;
}
/* Check if context sharing is possible */
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
key->ck_format == CRYPTO_KEY_RAW &&
KCF_CAN_SHARE_OPSTATE(pd, mech->cm_type)) {
kcf_context_t *tctxp = (kcf_context_t *)ctx;
kcf_provider_desc_t *tpd = NULL;
crypto_mech_info_t *sinfo;
if ((kcf_get_sw_prov(mech->cm_type, &tpd, &tctxp->kc_mech,
B_FALSE) == CRYPTO_SUCCESS)) {
int tlen;
sinfo = &(KCF_TO_PROV_MECHINFO(tpd, mech->cm_type));
/*
* key->ck_length from the consumer is always in bits.
* We convert it to be in the same unit registered by
* the provider in order to do a comparison.
*/
if (sinfo->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)
tlen = key->ck_length >> 3;
else
tlen = key->ck_length;
/*
* Check if the software provider can support context
* sharing and support this key length.
*/
if ((sinfo->cm_mech_flags & CRYPTO_CAN_SHARE_OPSTATE) &&
(tlen >= sinfo->cm_min_key_length) &&
(tlen <= sinfo->cm_max_key_length)) {
ctx->cc_flags = CRYPTO_INIT_OPSTATE;
tctxp->kc_sw_prov_desc = tpd;
} else
KCF_PROV_REFRELE(tpd);
}
}
if (func == CRYPTO_FG_ENCRYPT) {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_INIT, sid,
mech, key, NULL, NULL, tmpl);
} else {
ASSERT(func == CRYPTO_FG_DECRYPT);
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_INIT, sid,
mech, key, NULL, NULL, tmpl);
}
error = kcf_submit_request(real_provider, ctx, crq, &params,
B_FALSE);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
done:
if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
return (error);
}
/*
* Same as crypto_cipher_init_prov(), but relies on the scheduler to pick
* an appropriate provider. See crypto_cipher_init_prov() comments for more
* details.
*/
static int
crypto_cipher_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq, crypto_func_group_t func)
{
int error;
kcf_mech_entry_t *me;
kcf_provider_desc_t *pd;
kcf_ctx_template_t *ctx_tmpl;
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
kcf_prov_tried_t *list = NULL;
retry:
/* pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, func, CHECK_RESTRICT(crq), 0)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (CRYPTO_OLD_CTX_TEMPLATE);
} else {
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
}
}
error = crypto_cipher_init_prov(pd, pd->pd_sid, mech, key,
spi_ctx_tmpl, ctxp, crq, func);
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_encrypt_prov()
*
* Arguments:
* pd: provider descriptor
* sid: session id
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* key: pointer to a crypto_key_t structure.
* plaintext: The message to be encrypted
* ciphertext: Storage for the encrypted message. The length needed
* depends on the mechanism, and the plaintext's size.
* tmpl: a crypto_ctx_template_t, opaque template of a context of an
* encryption with the 'mech' using 'key'. 'tmpl' is created by
* a previous call to crypto_create_ctx_template().
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* single-part encryption of 'plaintext' with the mechanism 'mech', using
* the key 'key'.
* When complete and successful, 'ciphertext' will contain the encrypted
* message.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_encrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_data_t *plaintext, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_data_t *ciphertext,
crypto_call_req_t *crq)
{
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int error;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_ENCRYPT_ATOMIC);
if (error != CRYPTO_SUCCESS)
return (error);
}
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
plaintext, ciphertext, tmpl);
error = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (error);
}
/*
* Same as crypto_encrypt_prov(), but relies on the scheduler to pick
* a provider. See crypto_encrypt_prov() for more details.
*/
int
crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext,
crypto_call_req_t *crq)
{
int error;
kcf_mech_entry_t *me;
kcf_req_params_t params;
kcf_provider_desc_t *pd;
kcf_ctx_template_t *ctx_tmpl;
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
kcf_prov_tried_t *list = NULL;
retry:
/* pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_ENCRYPT_ATOMIC, CHECK_RESTRICT(crq),
plaintext->cd_length)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (CRYPTO_OLD_CTX_TEMPLATE);
} else {
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
}
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
error = KCF_PROV_ENCRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
plaintext, ciphertext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
mech, key, plaintext, ciphertext, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_encrypt_init_prov()
*
* Calls crypto_cipher_init_prov() to initialize an encryption operation.
*/
int
crypto_encrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq)
{
return (crypto_cipher_init_prov(pd, sid, mech, key, tmpl, ctxp, crq,
CRYPTO_FG_ENCRYPT));
}
/*
* crypto_encrypt_init()
*
* Calls crypto_cipher_init() to initialize an encryption operation
*/
int
crypto_encrypt_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq)
{
return (crypto_cipher_init(mech, key, tmpl, ctxp, crq,
CRYPTO_FG_ENCRYPT));
}
/*
* crypto_encrypt_update()
*
* Arguments:
* context: A crypto_context_t initialized by encrypt_init().
* plaintext: The message part to be encrypted
* ciphertext: Storage for the encrypted message part.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* part of an encryption operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_encrypt_update(crypto_context_t context, crypto_data_t *plaintext,
crypto_data_t *ciphertext, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_ENCRYPT_UPDATE(pd, ctx, plaintext,
ciphertext, NULL);
KCF_PROV_INCRSTATS(pd, error);
return (error);
}
/* Check if we should use a software provider for small jobs */
if ((ctx->cc_flags & CRYPTO_USE_OPSTATE) && cr == NULL) {
if (plaintext->cd_length < kcf_ctx->kc_mech->me_threshold &&
kcf_ctx->kc_sw_prov_desc != NULL &&
KCF_IS_PROV_USABLE(kcf_ctx->kc_sw_prov_desc)) {
pd = kcf_ctx->kc_sw_prov_desc;
}
}
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, plaintext, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
return (error);
}
/*
* crypto_encrypt_final()
*
* Arguments:
* context: A crypto_context_t initialized by encrypt_init().
* ciphertext: Storage for the last part of encrypted message
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs the
* final part of an encryption operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_ENCRYPT_FINAL(pd, ctx, ciphertext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
/*
* crypto_decrypt_prov()
*
* Arguments:
* pd: provider descriptor
* sid: session id
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* key: pointer to a crypto_key_t structure.
* ciphertext: The message to be encrypted
* plaintext: Storage for the encrypted message. The length needed
* depends on the mechanism, and the plaintext's size.
* tmpl: a crypto_ctx_template_t, opaque template of a context of an
* encryption with the 'mech' using 'key'. 'tmpl' is created by
* a previous call to crypto_create_ctx_template().
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* single-part decryption of 'ciphertext' with the mechanism 'mech', using
* the key 'key'.
* When complete and successful, 'plaintext' will contain the decrypted
* message.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_decrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_data_t *ciphertext, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_data_t *plaintext,
crypto_call_req_t *crq)
{
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int rv;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_DECRYPT_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
ciphertext, plaintext, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
}
/*
* Same as crypto_decrypt_prov(), but relies on the KCF scheduler to
* choose a provider. See crypto_decrypt_prov() comments for more
* information.
*/
int
crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext,
crypto_call_req_t *crq)
{
int error;
kcf_mech_entry_t *me;
kcf_req_params_t params;
kcf_provider_desc_t *pd;
kcf_ctx_template_t *ctx_tmpl;
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
kcf_prov_tried_t *list = NULL;
retry:
/* pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_DECRYPT_ATOMIC, CHECK_RESTRICT(crq),
ciphertext->cd_length)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (CRYPTO_OLD_CTX_TEMPLATE);
} else {
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
}
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
error = KCF_PROV_DECRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
ciphertext, plaintext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
mech, key, ciphertext, plaintext, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_decrypt_init_prov()
*
* Calls crypto_cipher_init_prov() to initialize a decryption operation
*/
int
crypto_decrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq)
{
return (crypto_cipher_init_prov(pd, sid, mech, key, tmpl, ctxp, crq,
CRYPTO_FG_DECRYPT));
}
/*
* crypto_decrypt_init()
*
* Calls crypto_cipher_init() to initialize a decryption operation
*/
int
crypto_decrypt_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq)
{
return (crypto_cipher_init(mech, key, tmpl, ctxp, crq,
CRYPTO_FG_DECRYPT));
}
/*
* crypto_decrypt_update()
*
* Arguments:
* context: A crypto_context_t initialized by decrypt_init().
* ciphertext: The message part to be decrypted
* plaintext: Storage for the decrypted message part.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* part of an decryption operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_decrypt_update(crypto_context_t context, crypto_data_t *ciphertext,
crypto_data_t *plaintext, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DECRYPT_UPDATE(pd, ctx, ciphertext,
plaintext, NULL);
KCF_PROV_INCRSTATS(pd, error);
return (error);
}
/* Check if we should use a software provider for small jobs */
if ((ctx->cc_flags & CRYPTO_USE_OPSTATE) && cr == NULL) {
if (ciphertext->cd_length < kcf_ctx->kc_mech->me_threshold &&
kcf_ctx->kc_sw_prov_desc != NULL &&
KCF_IS_PROV_USABLE(kcf_ctx->kc_sw_prov_desc)) {
pd = kcf_ctx->kc_sw_prov_desc;
}
}
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, ciphertext, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
return (error);
}
/*
* crypto_decrypt_final()
*
* Arguments:
* context: A crypto_context_t initialized by decrypt_init().
* plaintext: Storage for the last part of the decrypted message
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs the
* final part of a decryption operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_decrypt_final(crypto_context_t context, crypto_data_t *plaintext,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext,
NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
/*
* See comments for crypto_encrypt_update().
*/
int
crypto_encrypt_single(crypto_context_t context, crypto_data_t *plaintext,
crypto_data_t *ciphertext, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_ENCRYPT(pd, ctx, plaintext,
ciphertext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, plaintext, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
/*
* See comments for crypto_decrypt_update().
*/
int
crypto_decrypt_single(crypto_context_t context, crypto_data_t *ciphertext,
crypto_data_t *plaintext, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DECRYPT(pd, ctx, ciphertext,
plaintext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, ciphertext, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(crypto_cipher_init_prov);
EXPORT_SYMBOL(crypto_cipher_init);
EXPORT_SYMBOL(crypto_encrypt_prov);
EXPORT_SYMBOL(crypto_encrypt);
EXPORT_SYMBOL(crypto_encrypt_init_prov);
EXPORT_SYMBOL(crypto_encrypt_init);
EXPORT_SYMBOL(crypto_encrypt_update);
EXPORT_SYMBOL(crypto_encrypt_final);
EXPORT_SYMBOL(crypto_decrypt_prov);
EXPORT_SYMBOL(crypto_decrypt);
EXPORT_SYMBOL(crypto_decrypt_init_prov);
EXPORT_SYMBOL(crypto_decrypt_init);
EXPORT_SYMBOL(crypto_decrypt_update);
EXPORT_SYMBOL(crypto_decrypt_final);
EXPORT_SYMBOL(crypto_encrypt_single);
EXPORT_SYMBOL(crypto_decrypt_single);
#endif

151
module/icp/api/kcf_ctxops.c Normal file
View File

@ -0,0 +1,151 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
/*
* Crypto contexts manipulation routines
*/
/*
* crypto_create_ctx_template()
*
* Arguments:
*
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* key: pointer to a crypto_key_t structure.
* ptmpl: a storage for the opaque crypto_ctx_template_t, allocated and
* initialized by the software provider this routine is
* dispatched to.
* kmflag: KM_SLEEP/KM_NOSLEEP mem. alloc. flag.
*
* Description:
* Redirects the call to the software provider of the specified
* mechanism. That provider will allocate and pre-compute/pre-expand
* the context template, reusable by later calls to crypto_xxx_init().
* The size and address of that provider context template are stored
* in an internal structure, kcf_ctx_template_t. The address of that
* structure is given back to the caller in *ptmpl.
*
* Context:
* Process or interrupt.
*
* Returns:
* CRYPTO_SUCCESS when the context template is successfully created.
* CRYPTO_HOST_MEMEORY: mem alloc failure
* CRYPTO_ARGUMENTS_BAD: NULL storage for the ctx template.
* RYPTO_MECHANISM_INVALID: invalid mechanism 'mech'.
*/
int
crypto_create_ctx_template(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t *ptmpl, int kmflag)
{
int error;
kcf_mech_entry_t *me;
kcf_provider_desc_t *pd;
kcf_ctx_template_t *ctx_tmpl;
crypto_mechanism_t prov_mech;
/* A few args validation */
if (ptmpl == NULL)
return (CRYPTO_ARGUMENTS_BAD);
if (mech == NULL)
return (CRYPTO_MECHANISM_INVALID);
error = kcf_get_sw_prov(mech->cm_type, &pd, &me, B_TRUE);
if (error != CRYPTO_SUCCESS)
return (error);
if ((ctx_tmpl = (kcf_ctx_template_t *)kmem_alloc(
sizeof (kcf_ctx_template_t), kmflag)) == NULL) {
KCF_PROV_REFRELE(pd);
return (CRYPTO_HOST_MEMORY);
}
/* Pass a mechtype that the provider understands */
prov_mech.cm_type = KCF_TO_PROV_MECHNUM(pd, mech->cm_type);
prov_mech.cm_param = mech->cm_param;
prov_mech.cm_param_len = mech->cm_param_len;
error = KCF_PROV_CREATE_CTX_TEMPLATE(pd, &prov_mech, key,
&(ctx_tmpl->ct_prov_tmpl), &(ctx_tmpl->ct_size), KCF_RHNDL(kmflag));
if (error == CRYPTO_SUCCESS) {
ctx_tmpl->ct_generation = me->me_gen_swprov;
*ptmpl = ctx_tmpl;
} else {
kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t));
}
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_destroy_ctx_template()
*
* Arguments:
*
* tmpl: an opaque crypto_ctx_template_t previously created by
* crypto_create_ctx_template()
*
* Description:
* Frees the inbedded crypto_spi_ctx_template_t, then the
* kcf_ctx_template_t.
*
* Context:
* Process or interrupt.
*
*/
void
crypto_destroy_ctx_template(crypto_ctx_template_t tmpl)
{
kcf_ctx_template_t *ctx_tmpl = (kcf_ctx_template_t *)tmpl;
if (ctx_tmpl == NULL)
return;
ASSERT(ctx_tmpl->ct_prov_tmpl != NULL);
bzero(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
kmem_free(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t));
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(crypto_create_ctx_template);
EXPORT_SYMBOL(crypto_destroy_ctx_template);
#endif

494
module/icp/api/kcf_digest.c Normal file
View File

@ -0,0 +1,494 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
#define CRYPTO_DIGEST_OFFSET(f) offsetof(crypto_digest_ops_t, f)
/*
* Message digest routines
*/
/*
* The following are the possible returned values common to all the routines
* below. The applicability of some of these return values depends on the
* presence of the arguments.
*
* CRYPTO_SUCCESS: The operation completed successfully.
* CRYPTO_QUEUED: A request was submitted successfully. The callback
* routine will be called when the operation is done.
* CRYPTO_MECHANISM_INVALID or CRYPTO_INVALID_MECH_PARAM
* for problems with the 'mech'.
* CRYPTO_INVALID_DATA for bogus 'data'
* CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work.
* CRYPTO_INVALID_CONTEXT: Not a valid context.
* CRYPTO_BUSY: Cannot process the request now. Schedule a
* crypto_bufcall(), or try later.
* CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED:
* No provider is capable of a function or a mechanism.
*/
/*
* crypto_digest_prov()
*
* Arguments:
* pd: pointer to the descriptor of the provider to use for this
* operation.
* sid: provider session id.
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* data: The message to be digested.
* digest: Storage for the digest. The length needed depends on the
* mechanism.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs the
* digesting operation of 'data' on the specified
* provider with the specified session.
* When complete and successful, 'digest' will contain the digest value.
* The caller should hold a reference on the specified provider
* descriptor before calling this function.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_digest_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_data_t *data, crypto_data_t *digest,
crypto_call_req_t *crq)
{
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int rv;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq),
pd, &real_provider, CRYPTO_FG_DIGEST_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, NULL,
data, digest);
/* no crypto context to carry between multiple parts. */
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
}
/*
* Same as crypto_digest_prov(), but relies on the KCF scheduler to
* choose a provider. See crypto_digest_prov() comments for more information.
*/
int
crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data,
crypto_data_t *digest, crypto_call_req_t *crq)
{
int error;
kcf_provider_desc_t *pd;
kcf_req_params_t params;
kcf_prov_tried_t *list = NULL;
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list,
CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq),
data->cd_length)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
error = KCF_PROV_DIGEST_ATOMIC(pd, pd->pd_sid, &lmech, data,
digest, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
(data->cd_length > pd->pd_hash_limit)) {
error = CRYPTO_BUFFER_TOO_BIG;
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC,
pd->pd_sid, mech, NULL, data, digest);
/* no crypto context to carry between multiple parts. */
error = kcf_submit_request(pd, NULL, crq, &params,
B_FALSE);
}
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_digest_init_prov()
*
* pd: pointer to the descriptor of the provider to use for this
* operation.
* sid: provider session id.
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* ctxp: Pointer to a crypto_context_t.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs the
* initialization of a message digest operation on the specified
* provider with the specified session.
* When complete and successful, 'ctxp' will contain a crypto_context_t
* valid for later calls to digest_update() and digest_final().
* The caller should hold a reference on the specified provider
* descriptor before calling this function.
*/
int
crypto_digest_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_context_t *ctxp, crypto_call_req_t *crq)
{
int error;
crypto_ctx_t *ctx;
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_DIGEST);
if (error != CRYPTO_SUCCESS)
return (error);
}
/* Allocate and initialize the canonical context */
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
error = KCF_PROV_DIGEST_INIT(real_provider, ctx, &lmech,
KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_INIT, sid,
mech, NULL, NULL, NULL);
error = kcf_submit_request(real_provider, ctx, crq, &params,
B_FALSE);
}
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
return (error);
}
/*
* Same as crypto_digest_init_prov(), but relies on the KCF scheduler
* to choose a provider. See crypto_digest_init_prov() comments for
* more information.
*/
int
crypto_digest_init(crypto_mechanism_t *mech, crypto_context_t *ctxp,
crypto_call_req_t *crq)
{
int error;
kcf_provider_desc_t *pd;
kcf_prov_tried_t *list = NULL;
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error,
list, CRYPTO_FG_DIGEST, CHECK_RESTRICT(crq), 0)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
/*
* The hardware provider has limited digest support.
* So, we fallback early here to using a software provider.
*
* XXX - need to enhance to do the fallback later in
* crypto_digest_update() if the size of accumulated input data
* exceeds the maximum size digestable by hardware provider.
*/
error = CRYPTO_BUFFER_TOO_BIG;
} else {
error = crypto_digest_init_prov(pd, pd->pd_sid,
mech, ctxp, crq);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_digest_update()
*
* Arguments:
* context: A crypto_context_t initialized by digest_init().
* data: The part of message to be digested.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* part of a message digest operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_digest_update(crypto_context_t context, crypto_data_t *data,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST_UPDATE(pd, ctx, data, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, data, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
return (error);
}
/*
* crypto_digest_final()
*
* Arguments:
* context: A crypto_context_t initialized by digest_init().
* digest: The storage for the digest.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs the
* final part of a message digest operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_digest_final(crypto_context_t context, crypto_data_t *digest,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST_FINAL(pd, ctx, digest, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, digest);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
/*
* Performs a digest update on the specified key. Note that there is
* no k-API crypto_digest_key() equivalent of this function.
*/
int
crypto_digest_key_prov(crypto_context_t context, crypto_key_t *key,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST_KEY(pd, ctx, key, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_DIGEST_KEY,
ctx->cc_session, NULL, key, NULL, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
return (error);
}
/*
* See comments for crypto_digest_update() and crypto_digest_final().
*/
int
crypto_digest_single(crypto_context_t context, crypto_data_t *data,
crypto_data_t *digest, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST(pd, ctx, data, digest, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, data, digest);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(crypto_digest_prov);
EXPORT_SYMBOL(crypto_digest);
EXPORT_SYMBOL(crypto_digest_init_prov);
EXPORT_SYMBOL(crypto_digest_init);
EXPORT_SYMBOL(crypto_digest_update);
EXPORT_SYMBOL(crypto_digest_final);
EXPORT_SYMBOL(crypto_digest_key_prov);
EXPORT_SYMBOL(crypto_digest_single);
#endif

648
module/icp/api/kcf_mac.c Normal file
View File

@ -0,0 +1,648 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
#define CRYPTO_MAC_OFFSET(f) offsetof(crypto_mac_ops_t, f)
/*
* Message authentication codes routines.
*/
/*
* The following are the possible returned values common to all the routines
* below. The applicability of some of these return values depends on the
* presence of the arguments.
*
* CRYPTO_SUCCESS: The operation completed successfully.
* CRYPTO_QUEUED: A request was submitted successfully. The callback
* routine will be called when the operation is done.
* CRYPTO_INVALID_MECH_NUMBER, CRYPTO_INVALID_MECH_PARAM, or
* CRYPTO_INVALID_MECH for problems with the 'mech'.
* CRYPTO_INVALID_DATA for bogus 'data'
* CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work.
* CRYPTO_INVALID_CONTEXT: Not a valid context.
* CRYPTO_BUSY: Cannot process the request now. Schedule a
* crypto_bufcall(), or try later.
* CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED: No provider is
* capable of a function or a mechanism.
* CRYPTO_INVALID_KEY: bogus 'key' argument.
* CRYPTO_INVALID_MAC: bogus 'mac' argument.
*/
/*
* crypto_mac_prov()
*
* Arguments:
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* key: pointer to a crypto_key_t structure.
* data: The message to compute the MAC for.
* mac: Storage for the MAC. The length needed depends on the mechanism.
* tmpl: a crypto_ctx_template_t, opaque template of a context of a
* MAC with the 'mech' using 'key'. 'tmpl' is created by
* a previous call to crypto_create_ctx_template().
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* single-part message authentication of 'data' with the mechanism
* 'mech', using * the key 'key', on the specified provider with
* the specified session id.
* When complete and successful, 'mac' will contain the message
* authentication code.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'crq'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_mac_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq)
{
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int rv;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_MAC_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
data, mac, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
}
/*
* Same as crypto_mac_prov(), but relies on the KCF scheduler to choose
* a provider. See crypto_mac() comments for more information.
*/
int
crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
crypto_call_req_t *crq)
{
int error;
kcf_mech_entry_t *me;
kcf_req_params_t params;
kcf_provider_desc_t *pd;
kcf_ctx_template_t *ctx_tmpl;
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
kcf_prov_tried_t *list = NULL;
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
data->cd_length)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (CRYPTO_OLD_CTX_TEMPLATE);
} else {
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
}
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
error = KCF_PROV_MAC_ATOMIC(pd, pd->pd_sid, &lmech, key, data,
mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
(data->cd_length > pd->pd_hash_limit)) {
/*
* XXX - We need a check to see if this is indeed
* a HMAC. So far, all kernel clients use
* this interface only for HMAC. So, this is fine
* for now.
*/
error = CRYPTO_BUFFER_TOO_BIG;
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC,
pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params,
KCF_ISDUALREQ(crq));
}
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* Single part operation to compute the MAC corresponding to the specified
* 'data' and to verify that it matches the MAC specified by 'mac'.
* The other arguments are the same as the function crypto_mac_prov().
*/
int
crypto_mac_verify_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq)
{
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int rv;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_MAC_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_MAC_VERIFY_ATOMIC, sid, mech,
key, data, mac, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
}
/*
* Same as crypto_mac_verify_prov(), but relies on the KCF scheduler to choose
* a provider. See crypto_mac_verify_prov() comments for more information.
*/
int
crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
crypto_call_req_t *crq)
{
int error;
kcf_mech_entry_t *me;
kcf_req_params_t params;
kcf_provider_desc_t *pd;
kcf_ctx_template_t *ctx_tmpl;
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
kcf_prov_tried_t *list = NULL;
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
data->cd_length)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (CRYPTO_OLD_CTX_TEMPLATE);
} else {
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
}
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
error = KCF_PROV_MAC_VERIFY_ATOMIC(pd, pd->pd_sid, &lmech, key,
data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
(data->cd_length > pd->pd_hash_limit)) {
/* see comments in crypto_mac() */
error = CRYPTO_BUFFER_TOO_BIG;
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params,
KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
key, data, mac, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params,
KCF_ISDUALREQ(crq));
}
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_mac_init_prov()
*
* Arguments:
* pd: pointer to the descriptor of the provider to use for this
* operation.
* sid: provider session id.
* mech: crypto_mechanism_t pointer.
* mech_type is a valid value previously returned by
* crypto_mech2id();
* When the mech's parameter is not NULL, its definition depends
* on the standard definition of the mechanism.
* key: pointer to a crypto_key_t structure.
* tmpl: a crypto_ctx_template_t, opaque template of a context of a
* MAC with the 'mech' using 'key'. 'tmpl' is created by
* a previous call to crypto_create_ctx_template().
* ctxp: Pointer to a crypto_context_t.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs the
* initialization of a MAC operation on the specified provider with
* the specified session.
* When possible and applicable, will internally use the pre-computed MAC
* context from the context template, tmpl.
* When complete and successful, 'ctxp' will contain a crypto_context_t
* valid for later calls to mac_update() and mac_final().
* The caller should hold a reference on the specified provider
* descriptor before calling this function.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl,
crypto_context_t *ctxp, crypto_call_req_t *crq)
{
int rv;
crypto_ctx_t *ctx;
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_MAC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
/* Allocate and initialize the canonical context */
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl,
KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, rv);
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_INIT, sid, mech, key,
NULL, NULL, tmpl);
rv = kcf_submit_request(real_provider, ctx, crq, &params,
B_FALSE);
}
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
return (rv);
}
/*
* Same as crypto_mac_init_prov(), but relies on the KCF scheduler to
* choose a provider. See crypto_mac_init_prov() comments for more
* information.
*/
int
crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq)
{
int error;
kcf_mech_entry_t *me;
kcf_provider_desc_t *pd;
kcf_ctx_template_t *ctx_tmpl;
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
kcf_prov_tried_t *list = NULL;
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq), 0)) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (CRYPTO_OLD_CTX_TEMPLATE);
} else {
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
}
}
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
/*
* The hardware provider has limited HMAC support.
* So, we fallback early here to using a software provider.
*
* XXX - need to enhance to do the fallback later in
* crypto_mac_update() if the size of accumulated input data
* exceeds the maximum size digestable by hardware provider.
*/
error = CRYPTO_BUFFER_TOO_BIG;
} else {
error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key,
spi_ctx_tmpl, ctxp, crq);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
goto retry;
}
if (list != NULL)
kcf_free_triedlist(list);
KCF_PROV_REFRELE(pd);
return (error);
}
/*
* crypto_mac_update()
*
* Arguments:
* context: A crypto_context_t initialized by mac_init().
* data: The message part to be MAC'ed
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* part of a MAC operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_mac_update(crypto_context_t context, crypto_data_t *data,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
kcf_req_params_t params;
int rv;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL);
KCF_PROV_INCRSTATS(pd, rv);
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, data, NULL, NULL);
rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
return (rv);
}
/*
* crypto_mac_final()
*
* Arguments:
* context: A crypto_context_t initialized by mac_init().
* mac: Storage for the message authentication code.
* cr: crypto_call_req_t calling conditions and call back info.
*
* Description:
* Asynchronously submits a request for, or synchronously performs a
* part of a message authentication operation.
*
* Context:
* Process or interrupt, according to the semantics dictated by the 'cr'.
*
* Returns:
* See comment in the beginning of the file.
*/
int
crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
kcf_req_params_t params;
int rv;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL);
KCF_PROV_INCRSTATS(pd, rv);
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, mac, NULL);
rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx);
return (rv);
}
/*
* See comments for crypto_mac_update() and crypto_mac_final().
*/
int
crypto_mac_single(crypto_context_t context, crypto_data_t *data,
crypto_data_t *mac, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_MAC(pd, ctx, data, mac, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, data, mac, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(crypto_mac_prov);
EXPORT_SYMBOL(crypto_mac);
EXPORT_SYMBOL(crypto_mac_verify_prov);
EXPORT_SYMBOL(crypto_mac_verify);
EXPORT_SYMBOL(crypto_mac_init_prov);
EXPORT_SYMBOL(crypto_mac_init);
EXPORT_SYMBOL(crypto_mac_update);
EXPORT_SYMBOL(crypto_mac_final);
EXPORT_SYMBOL(crypto_mac_single);
#endif

View File

@ -0,0 +1,127 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/api.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
/*
* All event subscribers are put on a list. kcf_notify_list_lock
* protects changes to this list.
*
* The following locking order is maintained in the code - The
* global kcf_notify_list_lock followed by the individual lock
* in a kcf_ntfy_elem structure (kn_lock).
*/
kmutex_t ntfy_list_lock;
kcondvar_t ntfy_list_cv; /* cv the service thread waits on */
static kcf_ntfy_elem_t *ntfy_list_head;
/*
* crypto_mech2id()
*
* Arguments:
* . mechname: A null-terminated string identifying the mechanism name.
*
* Description:
* Walks the mechanisms tables, looking for an entry that matches the
* mechname. Once it find it, it builds the 64-bit mech_type and returns
* it. If there are no hardware or software providers for the mechanism,
* but there is an unloaded software provider, this routine will attempt
* to load it.
*
* Context:
* Process and interruption.
*
* Returns:
* The unique mechanism identified by 'mechname', if found.
* CRYPTO_MECH_INVALID otherwise.
*/
crypto_mech_type_t
crypto_mech2id(char *mechname)
{
return (crypto_mech2id_common(mechname, B_TRUE));
}
/*
* We walk the notification list and do the callbacks.
*/
void
kcf_walk_ntfylist(uint32_t event, void *event_arg)
{
kcf_ntfy_elem_t *nep;
int nelem = 0;
mutex_enter(&ntfy_list_lock);
/*
* Count how many clients are on the notification list. We need
* this count to ensure that clients which joined the list after we
* have started this walk, are not wrongly notified.
*/
for (nep = ntfy_list_head; nep != NULL; nep = nep->kn_next)
nelem++;
for (nep = ntfy_list_head; (nep != NULL && nelem); nep = nep->kn_next) {
nelem--;
/*
* Check if this client is interested in the
* event.
*/
if (!(nep->kn_event_mask & event))
continue;
mutex_enter(&nep->kn_lock);
nep->kn_state = NTFY_RUNNING;
mutex_exit(&nep->kn_lock);
mutex_exit(&ntfy_list_lock);
/*
* We invoke the callback routine with no locks held. Another
* client could have joined the list meanwhile. This is fine
* as we maintain nelem as stated above. The NULL check in the
* for loop guards against shrinkage. Also, any callers of
* crypto_unnotify_events() at this point cv_wait till kn_state
* changes to NTFY_WAITING. Hence, nep is assured to be valid.
*/
(*nep->kn_func)(event, event_arg);
mutex_enter(&nep->kn_lock);
nep->kn_state = NTFY_WAITING;
cv_broadcast(&nep->kn_cv);
mutex_exit(&nep->kn_lock);
mutex_enter(&ntfy_list_lock);
}
mutex_exit(&ntfy_list_lock);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(crypto_mech2id);
#endif

View File

@ -0,0 +1,23 @@
---------------------------------------------------------------------------
Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
LICENSE TERMS
The free distribution and use of this software is allowed (with or without
changes) provided that:
1. source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
2. binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation;
3. the name of the copyright holder is not used to endorse products
built using this software without specific written permission.
DISCLAIMER
This software is provided 'as is' with no explicit or implied warranties
in respect of its properties, including, but not limited to, correctness
and/or fitness for purpose.
---------------------------------------------------------------------------

View File

@ -0,0 +1 @@
PORTIONS OF AES FUNCTIONALITY

View File

@ -0,0 +1,127 @@
LICENSE ISSUES
==============
The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
the OpenSSL License and the original SSLeay license apply to the toolkit.
See below for the actual license texts. Actually both licenses are BSD-style
Open Source licenses. In case of any license issues related to OpenSSL
please contact openssl-core@openssl.org.
OpenSSL License
---------------
/* ====================================================================
* Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
Original SSLeay License
-----------------------
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/

View File

@ -0,0 +1 @@
PORTIONS OF AES FUNCTIONALITY

View File

@ -0,0 +1,900 @@
/*
* ---------------------------------------------------------------------------
* Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software is allowed (with or without
* changes) provided that:
*
* 1. source code distributions include the above copyright notice, this
* list of conditions and the following disclaimer;
*
* 2. binary distributions include the above copyright notice, this list
* of conditions and the following disclaimer in their documentation;
*
* 3. the name of the copyright holder is not used to endorse products
* built using this software without specific written permission.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
* Issue 20/12/2007
*
* I am grateful to Dag Arne Osvik for many discussions of the techniques that
* can be used to optimise AES assembler code on AMD64/EM64T architectures.
* Some of the techniques used in this implementation are the result of
* suggestions made by him for which I am most grateful.
*
* An AES implementation for AMD64 processors using the YASM assembler. This
* implementation provides only encryption, decryption and hence requires key
* scheduling support in C. It uses 8k bytes of tables but its encryption and
* decryption performance is very close to that obtained using large tables.
* It can use either MS Windows or Gnu/Linux/OpenSolaris OS calling conventions,
* which are as follows:
* ms windows gnu/linux/opensolaris os
*
* in_blk rcx rdi
* out_blk rdx rsi
* context (cx) r8 rdx
*
* preserved rsi - + rbx, rbp, rsp, r12, r13, r14 & r15
* registers rdi - on both
*
* destroyed - rsi + rax, rcx, rdx, r8, r9, r10 & r11
* registers - rdi on both
*
* The convention used here is that for gnu/linux/opensolaris os.
*
* This code provides the standard AES block size (128 bits, 16 bytes) and the
* three standard AES key sizes (128, 192 and 256 bits). It has the same call
* interface as my C implementation. It uses the Microsoft C AMD64 calling
* conventions in which the three parameters are placed in rcx, rdx and r8
* respectively. The rbx, rsi, rdi, rbp and r12..r15 registers are preserved.
*
* OpenSolaris Note:
* Modified to use GNU/Linux/Solaris calling conventions.
* That is parameters are placed in rdi, rsi, rdx, and rcx, respectively.
*
* AES_RETURN aes_encrypt(const unsigned char in_blk[],
* unsigned char out_blk[], const aes_encrypt_ctx cx[1])/
*
* AES_RETURN aes_decrypt(const unsigned char in_blk[],
* unsigned char out_blk[], const aes_decrypt_ctx cx[1])/
*
* AES_RETURN aes_encrypt_key<NNN>(const unsigned char key[],
* const aes_encrypt_ctx cx[1])/
*
* AES_RETURN aes_decrypt_key<NNN>(const unsigned char key[],
* const aes_decrypt_ctx cx[1])/
*
* AES_RETURN aes_encrypt_key(const unsigned char key[],
* unsigned int len, const aes_decrypt_ctx cx[1])/
*
* AES_RETURN aes_decrypt_key(const unsigned char key[],
* unsigned int len, const aes_decrypt_ctx cx[1])/
*
* where <NNN> is 128, 102 or 256. In the last two calls the length can be in
* either bits or bytes.
*
* Comment in/out the following lines to obtain the desired subroutines. These
* selections MUST match those in the C header file aesopt.h
*/
#define AES_REV_DKS /* define if key decryption schedule is reversed */
#define LAST_ROUND_TABLES /* define for the faster version using extra tables */
/*
* The encryption key schedule has the following in memory layout where N is the
* number of rounds (10, 12 or 14):
*
* lo: | input key (round 0) | / each round is four 32-bit words
* | encryption round 1 |
* | encryption round 2 |
* ....
* | encryption round N-1 |
* hi: | encryption round N |
*
* The decryption key schedule is normally set up so that it has the same
* layout as above by actually reversing the order of the encryption key
* schedule in memory (this happens when AES_REV_DKS is set):
*
* lo: | decryption round 0 | = | encryption round N |
* | decryption round 1 | = INV_MIX_COL[ | encryption round N-1 | ]
* | decryption round 2 | = INV_MIX_COL[ | encryption round N-2 | ]
* .... ....
* | decryption round N-1 | = INV_MIX_COL[ | encryption round 1 | ]
* hi: | decryption round N | = | input key (round 0) |
*
* with rounds except the first and last modified using inv_mix_column()
* But if AES_REV_DKS is NOT set the order of keys is left as it is for
* encryption so that it has to be accessed in reverse when used for
* decryption (although the inverse mix column modifications are done)
*
* lo: | decryption round 0 | = | input key (round 0) |
* | decryption round 1 | = INV_MIX_COL[ | encryption round 1 | ]
* | decryption round 2 | = INV_MIX_COL[ | encryption round 2 | ]
* .... ....
* | decryption round N-1 | = INV_MIX_COL[ | encryption round N-1 | ]
* hi: | decryption round N | = | encryption round N |
*
* This layout is faster when the assembler key scheduling provided here
* is used.
*
* End of user defines
*/
/*
* ---------------------------------------------------------------------------
* OpenSolaris OS modifications
*
* This source originates from Brian Gladman file aes_amd64.asm
* in http://fp.gladman.plus.com/AES/aes-src-04-03-08.zip
* with these changes:
*
* 1. Removed MS Windows-specific code within DLL_EXPORT, _SEH_, and
* !__GNUC__ ifdefs. Also removed ENCRYPTION, DECRYPTION,
* AES_128, AES_192, AES_256, AES_VAR ifdefs.
*
* 2. Translate yasm/nasm %define and .macro definitions to cpp(1) #define
*
* 3. Translate yasm/nasm %ifdef/%ifndef to cpp(1) #ifdef
*
* 4. Translate Intel/yasm/nasm syntax to ATT/OpenSolaris as(1) syntax
* (operands reversed, literals prefixed with "$", registers prefixed with "%",
* and "[register+offset]", addressing changed to "offset(register)",
* parenthesis in constant expressions "()" changed to square brackets "[]",
* "." removed from local (numeric) labels, and other changes.
* Examples:
* Intel/yasm/nasm Syntax ATT/OpenSolaris Syntax
* mov rax,(4*20h) mov $[4*0x20],%rax
* mov rax,[ebx+20h] mov 0x20(%ebx),%rax
* lea rax,[ebx+ecx] lea (%ebx,%ecx),%rax
* sub rax,[ebx+ecx*4-20h] sub -0x20(%ebx,%ecx,4),%rax
*
* 5. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
* /usr/include/sys/asm_linkage.h, lint(1B) guards, and dummy C function
* definitions for lint.
*
* 6. Renamed functions and reordered parameters to match OpenSolaris:
* Original Gladman interface:
* int aes_encrypt(const unsigned char *in,
* unsigned char *out, const aes_encrypt_ctx cx[1])/
* int aes_decrypt(const unsigned char *in,
* unsigned char *out, const aes_encrypt_ctx cx[1])/
* Note: aes_encrypt_ctx contains ks, a 60 element array of uint32_t,
* and a union type, inf., containing inf.l, a uint32_t and
* inf.b, a 4-element array of uint32_t. Only b[0] in the array (aka "l") is
* used and contains the key schedule length * 16 where key schedule length is
* 10, 12, or 14 bytes.
*
* OpenSolaris OS interface:
* void aes_encrypt_amd64(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4])/
* void aes_decrypt_amd64(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4])/
* typedef union {uint64_t ks64[(MAX_AES_NR + 1) * 4]/
* uint32_t ks32[(MAX_AES_NR + 1) * 4]/ } aes_ks_t/
* Note: ks is the AES key schedule, Nr is number of rounds, pt is plain text,
* ct is crypto text, and MAX_AES_NR is 14.
* For the x86 64-bit architecture, OpenSolaris OS uses ks32 instead of ks64.
*/
#if defined(lint) || defined(__lint)
#include <sys/types.h>
/* ARGSUSED */
void
aes_encrypt_amd64(const uint32_t rk[], int Nr, const uint32_t pt[4],
uint32_t ct[4]) {
}
/* ARGSUSED */
void
aes_decrypt_amd64(const uint32_t rk[], int Nr, const uint32_t ct[4],
uint32_t pt[4]) {
}
#else
#define _ASM
#include <sys/asm_linkage.h>
#define KS_LENGTH 60
#define raxd eax
#define rdxd edx
#define rcxd ecx
#define rbxd ebx
#define rsid esi
#define rdid edi
#define raxb al
#define rdxb dl
#define rcxb cl
#define rbxb bl
#define rsib sil
#define rdib dil
// finite field multiplies by {02}, {04} and {08}
#define f2(x) [[x<<1]^[[[x>>7]&1]*0x11b]]
#define f4(x) [[x<<2]^[[[x>>6]&1]*0x11b]^[[[x>>6]&2]*0x11b]]
#define f8(x) [[x<<3]^[[[x>>5]&1]*0x11b]^[[[x>>5]&2]*0x11b]^[[[x>>5]&4]*0x11b]]
// finite field multiplies required in table generation
#define f3(x) [[f2(x)] ^ [x]]
#define f9(x) [[f8(x)] ^ [x]]
#define fb(x) [[f8(x)] ^ [f2(x)] ^ [x]]
#define fd(x) [[f8(x)] ^ [f4(x)] ^ [x]]
#define fe(x) [[f8(x)] ^ [f4(x)] ^ [f2(x)]]
// macros for expanding S-box data
#define u8(x) [f2(x)], [x], [x], [f3(x)], [f2(x)], [x], [x], [f3(x)]
#define v8(x) [fe(x)], [f9(x)], [fd(x)], [fb(x)], [fe(x)], [f9(x)], [fd(x)], [x]
#define w8(x) [x], 0, 0, 0, [x], 0, 0, 0
#define enc_vals(x) \
.byte x(0x63),x(0x7c),x(0x77),x(0x7b),x(0xf2),x(0x6b),x(0x6f),x(0xc5); \
.byte x(0x30),x(0x01),x(0x67),x(0x2b),x(0xfe),x(0xd7),x(0xab),x(0x76); \
.byte x(0xca),x(0x82),x(0xc9),x(0x7d),x(0xfa),x(0x59),x(0x47),x(0xf0); \
.byte x(0xad),x(0xd4),x(0xa2),x(0xaf),x(0x9c),x(0xa4),x(0x72),x(0xc0); \
.byte x(0xb7),x(0xfd),x(0x93),x(0x26),x(0x36),x(0x3f),x(0xf7),x(0xcc); \
.byte x(0x34),x(0xa5),x(0xe5),x(0xf1),x(0x71),x(0xd8),x(0x31),x(0x15); \
.byte x(0x04),x(0xc7),x(0x23),x(0xc3),x(0x18),x(0x96),x(0x05),x(0x9a); \
.byte x(0x07),x(0x12),x(0x80),x(0xe2),x(0xeb),x(0x27),x(0xb2),x(0x75); \
.byte x(0x09),x(0x83),x(0x2c),x(0x1a),x(0x1b),x(0x6e),x(0x5a),x(0xa0); \
.byte x(0x52),x(0x3b),x(0xd6),x(0xb3),x(0x29),x(0xe3),x(0x2f),x(0x84); \
.byte x(0x53),x(0xd1),x(0x00),x(0xed),x(0x20),x(0xfc),x(0xb1),x(0x5b); \
.byte x(0x6a),x(0xcb),x(0xbe),x(0x39),x(0x4a),x(0x4c),x(0x58),x(0xcf); \
.byte x(0xd0),x(0xef),x(0xaa),x(0xfb),x(0x43),x(0x4d),x(0x33),x(0x85); \
.byte x(0x45),x(0xf9),x(0x02),x(0x7f),x(0x50),x(0x3c),x(0x9f),x(0xa8); \
.byte x(0x51),x(0xa3),x(0x40),x(0x8f),x(0x92),x(0x9d),x(0x38),x(0xf5); \
.byte x(0xbc),x(0xb6),x(0xda),x(0x21),x(0x10),x(0xff),x(0xf3),x(0xd2); \
.byte x(0xcd),x(0x0c),x(0x13),x(0xec),x(0x5f),x(0x97),x(0x44),x(0x17); \
.byte x(0xc4),x(0xa7),x(0x7e),x(0x3d),x(0x64),x(0x5d),x(0x19),x(0x73); \
.byte x(0x60),x(0x81),x(0x4f),x(0xdc),x(0x22),x(0x2a),x(0x90),x(0x88); \
.byte x(0x46),x(0xee),x(0xb8),x(0x14),x(0xde),x(0x5e),x(0x0b),x(0xdb); \
.byte x(0xe0),x(0x32),x(0x3a),x(0x0a),x(0x49),x(0x06),x(0x24),x(0x5c); \
.byte x(0xc2),x(0xd3),x(0xac),x(0x62),x(0x91),x(0x95),x(0xe4),x(0x79); \
.byte x(0xe7),x(0xc8),x(0x37),x(0x6d),x(0x8d),x(0xd5),x(0x4e),x(0xa9); \
.byte x(0x6c),x(0x56),x(0xf4),x(0xea),x(0x65),x(0x7a),x(0xae),x(0x08); \
.byte x(0xba),x(0x78),x(0x25),x(0x2e),x(0x1c),x(0xa6),x(0xb4),x(0xc6); \
.byte x(0xe8),x(0xdd),x(0x74),x(0x1f),x(0x4b),x(0xbd),x(0x8b),x(0x8a); \
.byte x(0x70),x(0x3e),x(0xb5),x(0x66),x(0x48),x(0x03),x(0xf6),x(0x0e); \
.byte x(0x61),x(0x35),x(0x57),x(0xb9),x(0x86),x(0xc1),x(0x1d),x(0x9e); \
.byte x(0xe1),x(0xf8),x(0x98),x(0x11),x(0x69),x(0xd9),x(0x8e),x(0x94); \
.byte x(0x9b),x(0x1e),x(0x87),x(0xe9),x(0xce),x(0x55),x(0x28),x(0xdf); \
.byte x(0x8c),x(0xa1),x(0x89),x(0x0d),x(0xbf),x(0xe6),x(0x42),x(0x68); \
.byte x(0x41),x(0x99),x(0x2d),x(0x0f),x(0xb0),x(0x54),x(0xbb),x(0x16)
#define dec_vals(x) \
.byte x(0x52),x(0x09),x(0x6a),x(0xd5),x(0x30),x(0x36),x(0xa5),x(0x38); \
.byte x(0xbf),x(0x40),x(0xa3),x(0x9e),x(0x81),x(0xf3),x(0xd7),x(0xfb); \
.byte x(0x7c),x(0xe3),x(0x39),x(0x82),x(0x9b),x(0x2f),x(0xff),x(0x87); \
.byte x(0x34),x(0x8e),x(0x43),x(0x44),x(0xc4),x(0xde),x(0xe9),x(0xcb); \
.byte x(0x54),x(0x7b),x(0x94),x(0x32),x(0xa6),x(0xc2),x(0x23),x(0x3d); \
.byte x(0xee),x(0x4c),x(0x95),x(0x0b),x(0x42),x(0xfa),x(0xc3),x(0x4e); \
.byte x(0x08),x(0x2e),x(0xa1),x(0x66),x(0x28),x(0xd9),x(0x24),x(0xb2); \
.byte x(0x76),x(0x5b),x(0xa2),x(0x49),x(0x6d),x(0x8b),x(0xd1),x(0x25); \
.byte x(0x72),x(0xf8),x(0xf6),x(0x64),x(0x86),x(0x68),x(0x98),x(0x16); \
.byte x(0xd4),x(0xa4),x(0x5c),x(0xcc),x(0x5d),x(0x65),x(0xb6),x(0x92); \
.byte x(0x6c),x(0x70),x(0x48),x(0x50),x(0xfd),x(0xed),x(0xb9),x(0xda); \
.byte x(0x5e),x(0x15),x(0x46),x(0x57),x(0xa7),x(0x8d),x(0x9d),x(0x84); \
.byte x(0x90),x(0xd8),x(0xab),x(0x00),x(0x8c),x(0xbc),x(0xd3),x(0x0a); \
.byte x(0xf7),x(0xe4),x(0x58),x(0x05),x(0xb8),x(0xb3),x(0x45),x(0x06); \
.byte x(0xd0),x(0x2c),x(0x1e),x(0x8f),x(0xca),x(0x3f),x(0x0f),x(0x02); \
.byte x(0xc1),x(0xaf),x(0xbd),x(0x03),x(0x01),x(0x13),x(0x8a),x(0x6b); \
.byte x(0x3a),x(0x91),x(0x11),x(0x41),x(0x4f),x(0x67),x(0xdc),x(0xea); \
.byte x(0x97),x(0xf2),x(0xcf),x(0xce),x(0xf0),x(0xb4),x(0xe6),x(0x73); \
.byte x(0x96),x(0xac),x(0x74),x(0x22),x(0xe7),x(0xad),x(0x35),x(0x85); \
.byte x(0xe2),x(0xf9),x(0x37),x(0xe8),x(0x1c),x(0x75),x(0xdf),x(0x6e); \
.byte x(0x47),x(0xf1),x(0x1a),x(0x71),x(0x1d),x(0x29),x(0xc5),x(0x89); \
.byte x(0x6f),x(0xb7),x(0x62),x(0x0e),x(0xaa),x(0x18),x(0xbe),x(0x1b); \
.byte x(0xfc),x(0x56),x(0x3e),x(0x4b),x(0xc6),x(0xd2),x(0x79),x(0x20); \
.byte x(0x9a),x(0xdb),x(0xc0),x(0xfe),x(0x78),x(0xcd),x(0x5a),x(0xf4); \
.byte x(0x1f),x(0xdd),x(0xa8),x(0x33),x(0x88),x(0x07),x(0xc7),x(0x31); \
.byte x(0xb1),x(0x12),x(0x10),x(0x59),x(0x27),x(0x80),x(0xec),x(0x5f); \
.byte x(0x60),x(0x51),x(0x7f),x(0xa9),x(0x19),x(0xb5),x(0x4a),x(0x0d); \
.byte x(0x2d),x(0xe5),x(0x7a),x(0x9f),x(0x93),x(0xc9),x(0x9c),x(0xef); \
.byte x(0xa0),x(0xe0),x(0x3b),x(0x4d),x(0xae),x(0x2a),x(0xf5),x(0xb0); \
.byte x(0xc8),x(0xeb),x(0xbb),x(0x3c),x(0x83),x(0x53),x(0x99),x(0x61); \
.byte x(0x17),x(0x2b),x(0x04),x(0x7e),x(0xba),x(0x77),x(0xd6),x(0x26); \
.byte x(0xe1),x(0x69),x(0x14),x(0x63),x(0x55),x(0x21),x(0x0c),x(0x7d)
#define tptr %rbp /* table pointer */
#define kptr %r8 /* key schedule pointer */
#define fofs 128 /* adjust offset in key schedule to keep |disp| < 128 */
#define fk_ref(x, y) -16*x+fofs+4*y(kptr)
#ifdef AES_REV_DKS
#define rofs 128
#define ik_ref(x, y) -16*x+rofs+4*y(kptr)
#else
#define rofs -128
#define ik_ref(x, y) 16*x+rofs+4*y(kptr)
#endif /* AES_REV_DKS */
#define tab_0(x) (tptr,x,8)
#define tab_1(x) 3(tptr,x,8)
#define tab_2(x) 2(tptr,x,8)
#define tab_3(x) 1(tptr,x,8)
#define tab_f(x) 1(tptr,x,8)
#define tab_i(x) 7(tptr,x,8)
#define ff_rnd(p1, p2, p3, p4, round) /* normal forward round */ \
mov fk_ref(round,0), p1; \
mov fk_ref(round,1), p2; \
mov fk_ref(round,2), p3; \
mov fk_ref(round,3), p4; \
\
movzx %al, %esi; \
movzx %ah, %edi; \
shr $16, %eax; \
xor tab_0(%rsi), p1; \
xor tab_1(%rdi), p4; \
movzx %al, %esi; \
movzx %ah, %edi; \
xor tab_2(%rsi), p3; \
xor tab_3(%rdi), p2; \
\
movzx %bl, %esi; \
movzx %bh, %edi; \
shr $16, %ebx; \
xor tab_0(%rsi), p2; \
xor tab_1(%rdi), p1; \
movzx %bl, %esi; \
movzx %bh, %edi; \
xor tab_2(%rsi), p4; \
xor tab_3(%rdi), p3; \
\
movzx %cl, %esi; \
movzx %ch, %edi; \
shr $16, %ecx; \
xor tab_0(%rsi), p3; \
xor tab_1(%rdi), p2; \
movzx %cl, %esi; \
movzx %ch, %edi; \
xor tab_2(%rsi), p1; \
xor tab_3(%rdi), p4; \
\
movzx %dl, %esi; \
movzx %dh, %edi; \
shr $16, %edx; \
xor tab_0(%rsi), p4; \
xor tab_1(%rdi), p3; \
movzx %dl, %esi; \
movzx %dh, %edi; \
xor tab_2(%rsi), p2; \
xor tab_3(%rdi), p1; \
\
mov p1, %eax; \
mov p2, %ebx; \
mov p3, %ecx; \
mov p4, %edx
#ifdef LAST_ROUND_TABLES
#define fl_rnd(p1, p2, p3, p4, round) /* last forward round */ \
add $2048, tptr; \
mov fk_ref(round,0), p1; \
mov fk_ref(round,1), p2; \
mov fk_ref(round,2), p3; \
mov fk_ref(round,3), p4; \
\
movzx %al, %esi; \
movzx %ah, %edi; \
shr $16, %eax; \
xor tab_0(%rsi), p1; \
xor tab_1(%rdi), p4; \
movzx %al, %esi; \
movzx %ah, %edi; \
xor tab_2(%rsi), p3; \
xor tab_3(%rdi), p2; \
\
movzx %bl, %esi; \
movzx %bh, %edi; \
shr $16, %ebx; \
xor tab_0(%rsi), p2; \
xor tab_1(%rdi), p1; \
movzx %bl, %esi; \
movzx %bh, %edi; \
xor tab_2(%rsi), p4; \
xor tab_3(%rdi), p3; \
\
movzx %cl, %esi; \
movzx %ch, %edi; \
shr $16, %ecx; \
xor tab_0(%rsi), p3; \
xor tab_1(%rdi), p2; \
movzx %cl, %esi; \
movzx %ch, %edi; \
xor tab_2(%rsi), p1; \
xor tab_3(%rdi), p4; \
\
movzx %dl, %esi; \
movzx %dh, %edi; \
shr $16, %edx; \
xor tab_0(%rsi), p4; \
xor tab_1(%rdi), p3; \
movzx %dl, %esi; \
movzx %dh, %edi; \
xor tab_2(%rsi), p2; \
xor tab_3(%rdi), p1
#else
#define fl_rnd(p1, p2, p3, p4, round) /* last forward round */ \
mov fk_ref(round,0), p1; \
mov fk_ref(round,1), p2; \
mov fk_ref(round,2), p3; \
mov fk_ref(round,3), p4; \
\
movzx %al, %esi; \
movzx %ah, %edi; \
shr $16, %eax; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
xor %esi, p1; \
rol $8, %edi; \
xor %edi, p4; \
movzx %al, %esi; \
movzx %ah, %edi; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p3; \
xor %edi, p2; \
\
movzx %bl, %esi; \
movzx %bh, %edi; \
shr $16, %ebx; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
xor %esi, p2; \
rol $8, %edi; \
xor %edi, p1; \
movzx %bl, %esi; \
movzx %bh, %edi; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p4; \
xor %edi, p3; \
\
movzx %cl, %esi; \
movzx %ch, %edi; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
shr $16, %ecx; \
xor %esi, p3; \
rol $8, %edi; \
xor %edi, p2; \
movzx %cl, %esi; \
movzx %ch, %edi; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p1; \
xor %edi, p4; \
\
movzx %dl, %esi; \
movzx %dh, %edi; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
shr $16, %edx; \
xor %esi, p4; \
rol $8, %edi; \
xor %edi, p3; \
movzx %dl, %esi; \
movzx %dh, %edi; \
movzx tab_f(%rsi), %esi; \
movzx tab_f(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p2; \
xor %edi, p1
#endif /* LAST_ROUND_TABLES */
#define ii_rnd(p1, p2, p3, p4, round) /* normal inverse round */ \
mov ik_ref(round,0), p1; \
mov ik_ref(round,1), p2; \
mov ik_ref(round,2), p3; \
mov ik_ref(round,3), p4; \
\
movzx %al, %esi; \
movzx %ah, %edi; \
shr $16, %eax; \
xor tab_0(%rsi), p1; \
xor tab_1(%rdi), p2; \
movzx %al, %esi; \
movzx %ah, %edi; \
xor tab_2(%rsi), p3; \
xor tab_3(%rdi), p4; \
\
movzx %bl, %esi; \
movzx %bh, %edi; \
shr $16, %ebx; \
xor tab_0(%rsi), p2; \
xor tab_1(%rdi), p3; \
movzx %bl, %esi; \
movzx %bh, %edi; \
xor tab_2(%rsi), p4; \
xor tab_3(%rdi), p1; \
\
movzx %cl, %esi; \
movzx %ch, %edi; \
shr $16, %ecx; \
xor tab_0(%rsi), p3; \
xor tab_1(%rdi), p4; \
movzx %cl, %esi; \
movzx %ch, %edi; \
xor tab_2(%rsi), p1; \
xor tab_3(%rdi), p2; \
\
movzx %dl, %esi; \
movzx %dh, %edi; \
shr $16, %edx; \
xor tab_0(%rsi), p4; \
xor tab_1(%rdi), p1; \
movzx %dl, %esi; \
movzx %dh, %edi; \
xor tab_2(%rsi), p2; \
xor tab_3(%rdi), p3; \
\
mov p1, %eax; \
mov p2, %ebx; \
mov p3, %ecx; \
mov p4, %edx
#ifdef LAST_ROUND_TABLES
#define il_rnd(p1, p2, p3, p4, round) /* last inverse round */ \
add $2048, tptr; \
mov ik_ref(round,0), p1; \
mov ik_ref(round,1), p2; \
mov ik_ref(round,2), p3; \
mov ik_ref(round,3), p4; \
\
movzx %al, %esi; \
movzx %ah, %edi; \
shr $16, %eax; \
xor tab_0(%rsi), p1; \
xor tab_1(%rdi), p2; \
movzx %al, %esi; \
movzx %ah, %edi; \
xor tab_2(%rsi), p3; \
xor tab_3(%rdi), p4; \
\
movzx %bl, %esi; \
movzx %bh, %edi; \
shr $16, %ebx; \
xor tab_0(%rsi), p2; \
xor tab_1(%rdi), p3; \
movzx %bl, %esi; \
movzx %bh, %edi; \
xor tab_2(%rsi), p4; \
xor tab_3(%rdi), p1; \
\
movzx %cl, %esi; \
movzx %ch, %edi; \
shr $16, %ecx; \
xor tab_0(%rsi), p3; \
xor tab_1(%rdi), p4; \
movzx %cl, %esi; \
movzx %ch, %edi; \
xor tab_2(%rsi), p1; \
xor tab_3(%rdi), p2; \
\
movzx %dl, %esi; \
movzx %dh, %edi; \
shr $16, %edx; \
xor tab_0(%rsi), p4; \
xor tab_1(%rdi), p1; \
movzx %dl, %esi; \
movzx %dh, %edi; \
xor tab_2(%rsi), p2; \
xor tab_3(%rdi), p3
#else
#define il_rnd(p1, p2, p3, p4, round) /* last inverse round */ \
mov ik_ref(round,0), p1; \
mov ik_ref(round,1), p2; \
mov ik_ref(round,2), p3; \
mov ik_ref(round,3), p4; \
\
movzx %al, %esi; \
movzx %ah, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
shr $16, %eax; \
xor %esi, p1; \
rol $8, %edi; \
xor %edi, p2; \
movzx %al, %esi; \
movzx %ah, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p3; \
xor %edi, p4; \
\
movzx %bl, %esi; \
movzx %bh, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
shr $16, %ebx; \
xor %esi, p2; \
rol $8, %edi; \
xor %edi, p3; \
movzx %bl, %esi; \
movzx %bh, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p4; \
xor %edi, p1; \
\
movzx %cl, %esi; \
movzx %ch, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
shr $16, %ecx; \
xor %esi, p3; \
rol $8, %edi; \
xor %edi, p4; \
movzx %cl, %esi; \
movzx %ch, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p1; \
xor %edi, p2; \
\
movzx %dl, %esi; \
movzx %dh, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
shr $16, %edx; \
xor %esi, p4; \
rol $8, %edi; \
xor %edi, p1; \
movzx %dl, %esi; \
movzx %dh, %edi; \
movzx tab_i(%rsi), %esi; \
movzx tab_i(%rdi), %edi; \
rol $16, %esi; \
rol $24, %edi; \
xor %esi, p2; \
xor %edi, p3
#endif /* LAST_ROUND_TABLES */
/*
* OpenSolaris OS:
* void aes_encrypt_amd64(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4])/
*
* Original interface:
* int aes_encrypt(const unsigned char *in,
* unsigned char *out, const aes_encrypt_ctx cx[1])/
*/
.align 64
enc_tab:
enc_vals(u8)
#ifdef LAST_ROUND_TABLES
// Last Round Tables:
enc_vals(w8)
#endif
ENTRY_NP(aes_encrypt_amd64)
#ifdef GLADMAN_INTERFACE
// Original interface
sub $[4*8], %rsp // gnu/linux/opensolaris binary interface
mov %rsi, (%rsp) // output pointer (P2)
mov %rdx, %r8 // context (P3)
mov %rbx, 1*8(%rsp) // P1: input pointer in rdi
mov %rbp, 2*8(%rsp) // P2: output pointer in (rsp)
mov %r12, 3*8(%rsp) // P3: context in r8
movzx 4*KS_LENGTH(kptr), %esi // Get byte key length * 16
#else
// OpenSolaris OS interface
sub $[4*8], %rsp // Make room on stack to save registers
mov %rcx, (%rsp) // Save output pointer (P4) on stack
mov %rdi, %r8 // context (P1)
mov %rdx, %rdi // P3: save input pointer
shl $4, %esi // P2: esi byte key length * 16
mov %rbx, 1*8(%rsp) // Save registers
mov %rbp, 2*8(%rsp)
mov %r12, 3*8(%rsp)
// P1: context in r8
// P2: byte key length * 16 in esi
// P3: input pointer in rdi
// P4: output pointer in (rsp)
#endif /* GLADMAN_INTERFACE */
lea enc_tab(%rip), tptr
sub $fofs, kptr
// Load input block into registers
mov (%rdi), %eax
mov 1*4(%rdi), %ebx
mov 2*4(%rdi), %ecx
mov 3*4(%rdi), %edx
xor fofs(kptr), %eax
xor fofs+4(kptr), %ebx
xor fofs+8(kptr), %ecx
xor fofs+12(kptr), %edx
lea (kptr,%rsi), kptr
// Jump based on byte key length * 16:
cmp $[10*16], %esi
je 3f
cmp $[12*16], %esi
je 2f
cmp $[14*16], %esi
je 1f
mov $-1, %rax // error
jmp 4f
// Perform normal forward rounds
1: ff_rnd(%r9d, %r10d, %r11d, %r12d, 13)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 12)
2: ff_rnd(%r9d, %r10d, %r11d, %r12d, 11)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 10)
3: ff_rnd(%r9d, %r10d, %r11d, %r12d, 9)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 8)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 7)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 6)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 5)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 4)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 3)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 2)
ff_rnd(%r9d, %r10d, %r11d, %r12d, 1)
fl_rnd(%r9d, %r10d, %r11d, %r12d, 0)
// Copy results
mov (%rsp), %rbx
mov %r9d, (%rbx)
mov %r10d, 4(%rbx)
mov %r11d, 8(%rbx)
mov %r12d, 12(%rbx)
xor %rax, %rax
4: // Restore registers
mov 1*8(%rsp), %rbx
mov 2*8(%rsp), %rbp
mov 3*8(%rsp), %r12
add $[4*8], %rsp
ret
SET_SIZE(aes_encrypt_amd64)
/*
* OpenSolaris OS:
* void aes_decrypt_amd64(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4])/
*
* Original interface:
* int aes_decrypt(const unsigned char *in,
* unsigned char *out, const aes_encrypt_ctx cx[1])/
*/
.align 64
dec_tab:
dec_vals(v8)
#ifdef LAST_ROUND_TABLES
// Last Round Tables:
dec_vals(w8)
#endif
ENTRY_NP(aes_decrypt_amd64)
#ifdef GLADMAN_INTERFACE
// Original interface
sub $[4*8], %rsp // gnu/linux/opensolaris binary interface
mov %rsi, (%rsp) // output pointer (P2)
mov %rdx, %r8 // context (P3)
mov %rbx, 1*8(%rsp) // P1: input pointer in rdi
mov %rbp, 2*8(%rsp) // P2: output pointer in (rsp)
mov %r12, 3*8(%rsp) // P3: context in r8
movzx 4*KS_LENGTH(kptr), %esi // Get byte key length * 16
#else
// OpenSolaris OS interface
sub $[4*8], %rsp // Make room on stack to save registers
mov %rcx, (%rsp) // Save output pointer (P4) on stack
mov %rdi, %r8 // context (P1)
mov %rdx, %rdi // P3: save input pointer
shl $4, %esi // P2: esi byte key length * 16
mov %rbx, 1*8(%rsp) // Save registers
mov %rbp, 2*8(%rsp)
mov %r12, 3*8(%rsp)
// P1: context in r8
// P2: byte key length * 16 in esi
// P3: input pointer in rdi
// P4: output pointer in (rsp)
#endif /* GLADMAN_INTERFACE */
lea dec_tab(%rip), tptr
sub $rofs, kptr
// Load input block into registers
mov (%rdi), %eax
mov 1*4(%rdi), %ebx
mov 2*4(%rdi), %ecx
mov 3*4(%rdi), %edx
#ifdef AES_REV_DKS
mov kptr, %rdi
lea (kptr,%rsi), kptr
#else
lea (kptr,%rsi), %rdi
#endif
xor rofs(%rdi), %eax
xor rofs+4(%rdi), %ebx
xor rofs+8(%rdi), %ecx
xor rofs+12(%rdi), %edx
// Jump based on byte key length * 16:
cmp $[10*16], %esi
je 3f
cmp $[12*16], %esi
je 2f
cmp $[14*16], %esi
je 1f
mov $-1, %rax // error
jmp 4f
// Perform normal inverse rounds
1: ii_rnd(%r9d, %r10d, %r11d, %r12d, 13)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 12)
2: ii_rnd(%r9d, %r10d, %r11d, %r12d, 11)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 10)
3: ii_rnd(%r9d, %r10d, %r11d, %r12d, 9)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 8)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 7)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 6)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 5)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 4)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 3)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 2)
ii_rnd(%r9d, %r10d, %r11d, %r12d, 1)
il_rnd(%r9d, %r10d, %r11d, %r12d, 0)
// Copy results
mov (%rsp), %rbx
mov %r9d, (%rbx)
mov %r10d, 4(%rbx)
mov %r11d, 8(%rbx)
mov %r12d, 12(%rbx)
xor %rax, %rax
4: // Restore registers
mov 1*8(%rsp), %rbx
mov 2*8(%rsp), %rbp
mov 3*8(%rsp), %r12
add $[4*8], %rsp
ret
SET_SIZE(aes_decrypt_amd64)
#endif /* lint || __lint */

View File

@ -0,0 +1,851 @@
/*
* ====================================================================
* Written by Intel Corporation for the OpenSSL project to add support
* for Intel AES-NI instructions. Rights for redistribution and usage
* in source and binary forms are granted according to the OpenSSL
* license.
*
* Author: Huang Ying <ying.huang at intel dot com>
* Vinodh Gopal <vinodh.gopal at intel dot com>
* Kahraman Akdemir
*
* Intel AES-NI is a new set of Single Instruction Multiple Data (SIMD)
* instructions that are going to be introduced in the next generation
* of Intel processor, as of 2009. These instructions enable fast and
* secure data encryption and decryption, using the Advanced Encryption
* Standard (AES), defined by FIPS Publication number 197. The
* architecture introduces six instructions that offer full hardware
* support for AES. Four of them support high performance data
* encryption and decryption, and the other two instructions support
* the AES key expansion procedure.
* ====================================================================
*/
/*
* ====================================================================
* Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*/
/*
* ====================================================================
* OpenSolaris OS modifications
*
* This source originates as files aes-intel.S and eng_aesni_asm.pl, in
* patches sent sent Dec. 9, 2008 and Dec. 24, 2008, respectively, by
* Huang Ying of Intel to the openssl-dev mailing list under the subject
* of "Add support to Intel AES-NI instruction set for x86_64 platform".
*
* This OpenSolaris version has these major changes from the original source:
*
* 1. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
* /usr/include/sys/asm_linkage.h, lint(1B) guards, and dummy C function
* definitions for lint.
*
* 2. Formatted code, added comments, and added #includes and #defines.
*
* 3. If bit CR0.TS is set, clear and set the TS bit, after and before
* calling kpreempt_disable() and kpreempt_enable().
* If the TS bit is not set, Save and restore %xmm registers at the beginning
* and end of function calls (%xmm* registers are not saved and restored by
* during kernel thread preemption).
*
* 4. Renamed functions, reordered parameters, and changed return value
* to match OpenSolaris:
*
* OpenSSL interface:
* int intel_AES_set_encrypt_key(const unsigned char *userKey,
* const int bits, AES_KEY *key);
* int intel_AES_set_decrypt_key(const unsigned char *userKey,
* const int bits, AES_KEY *key);
* Return values for above are non-zero on error, 0 on success.
*
* void intel_AES_encrypt(const unsigned char *in, unsigned char *out,
* const AES_KEY *key);
* void intel_AES_decrypt(const unsigned char *in, unsigned char *out,
* const AES_KEY *key);
* typedef struct aes_key_st {
* unsigned int rd_key[4 *(AES_MAXNR + 1)];
* int rounds;
* unsigned int pad[3];
* } AES_KEY;
* Note: AES_LONG is undefined (that is, Intel uses 32-bit key schedules
* (ks32) instead of 64-bit (ks64).
* Number of rounds (aka round count) is at offset 240 of AES_KEY.
*
* OpenSolaris OS interface (#ifdefs removed for readability):
* int rijndael_key_setup_dec_intel(uint32_t rk[],
* const uint32_t cipherKey[], uint64_t keyBits);
* int rijndael_key_setup_enc_intel(uint32_t rk[],
* const uint32_t cipherKey[], uint64_t keyBits);
* Return values for above are 0 on error, number of rounds on success.
*
* void aes_encrypt_intel(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4]);
* void aes_decrypt_intel(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4]);
* typedef union {uint64_t ks64[(MAX_AES_NR + 1) * 4];
* uint32_t ks32[(MAX_AES_NR + 1) * 4]; } aes_ks_t;
*
* typedef union {
* uint32_t ks32[((MAX_AES_NR) + 1) * (MAX_AES_NB)];
* } aes_ks_t;
* typedef struct aes_key {
* aes_ks_t encr_ks, decr_ks;
* long double align128;
* int flags, nr, type;
* } aes_key_t;
*
* Note: ks is the AES key schedule, Nr is number of rounds, pt is plain text,
* ct is crypto text, and MAX_AES_NR is 14.
* For the x86 64-bit architecture, OpenSolaris OS uses ks32 instead of ks64.
*
* Note2: aes_ks_t must be aligned on a 0 mod 128 byte boundary.
*
* ====================================================================
*/
#if defined(lint) || defined(__lint)
#include <sys/types.h>
/* ARGSUSED */
void
aes_encrypt_intel(const uint32_t rk[], int Nr, const uint32_t pt[4],
uint32_t ct[4]) {
}
/* ARGSUSED */
void
aes_decrypt_intel(const uint32_t rk[], int Nr, const uint32_t ct[4],
uint32_t pt[4]) {
}
/* ARGSUSED */
int
rijndael_key_setup_enc_intel(uint32_t rk[], const uint32_t cipherKey[],
uint64_t keyBits) {
return (0);
}
/* ARGSUSED */
int
rijndael_key_setup_dec_intel(uint32_t rk[], const uint32_t cipherKey[],
uint64_t keyBits) {
return (0);
}
#else /* lint */
#define _ASM
#include <sys/asm_linkage.h>
#ifdef _KERNEL
/*
* Note: the CLTS macro clobbers P2 (%rsi) under i86xpv. That is,
* it calls HYPERVISOR_fpu_taskswitch() which modifies %rsi when it
* uses it to pass P2 to syscall.
* This also occurs with the STTS macro, but we dont care if
* P2 (%rsi) is modified just before function exit.
* The CLTS and STTS macros push and pop P1 (%rdi) already.
*/
#ifdef __xpv
#define PROTECTED_CLTS \
push %rsi; \
CLTS; \
pop %rsi
#else
#define PROTECTED_CLTS \
CLTS
#endif /* __xpv */
#define CLEAR_TS_OR_PUSH_XMM0_XMM1(tmpreg) \
push %rbp; \
mov %rsp, %rbp; \
movq %cr0, tmpreg; \
testq $CR0_TS, tmpreg; \
jnz 1f; \
and $-XMM_ALIGN, %rsp; \
sub $[XMM_SIZE * 2], %rsp; \
movaps %xmm0, 16(%rsp); \
movaps %xmm1, (%rsp); \
jmp 2f; \
1: \
PROTECTED_CLTS; \
2:
/*
* If CR0_TS was not set above, pop %xmm0 and %xmm1 off stack,
* otherwise set CR0_TS.
*/
#define SET_TS_OR_POP_XMM0_XMM1(tmpreg) \
testq $CR0_TS, tmpreg; \
jnz 1f; \
movaps (%rsp), %xmm1; \
movaps 16(%rsp), %xmm0; \
jmp 2f; \
1: \
STTS(tmpreg); \
2: \
mov %rbp, %rsp; \
pop %rbp
/*
* If CR0_TS is not set, align stack (with push %rbp) and push
* %xmm0 - %xmm6 on stack, otherwise clear CR0_TS
*/
#define CLEAR_TS_OR_PUSH_XMM0_TO_XMM6(tmpreg) \
push %rbp; \
mov %rsp, %rbp; \
movq %cr0, tmpreg; \
testq $CR0_TS, tmpreg; \
jnz 1f; \
and $-XMM_ALIGN, %rsp; \
sub $[XMM_SIZE * 7], %rsp; \
movaps %xmm0, 96(%rsp); \
movaps %xmm1, 80(%rsp); \
movaps %xmm2, 64(%rsp); \
movaps %xmm3, 48(%rsp); \
movaps %xmm4, 32(%rsp); \
movaps %xmm5, 16(%rsp); \
movaps %xmm6, (%rsp); \
jmp 2f; \
1: \
PROTECTED_CLTS; \
2:
/*
* If CR0_TS was not set above, pop %xmm0 - %xmm6 off stack,
* otherwise set CR0_TS.
*/
#define SET_TS_OR_POP_XMM0_TO_XMM6(tmpreg) \
testq $CR0_TS, tmpreg; \
jnz 1f; \
movaps (%rsp), %xmm6; \
movaps 16(%rsp), %xmm5; \
movaps 32(%rsp), %xmm4; \
movaps 48(%rsp), %xmm3; \
movaps 64(%rsp), %xmm2; \
movaps 80(%rsp), %xmm1; \
movaps 96(%rsp), %xmm0; \
jmp 2f; \
1: \
STTS(tmpreg); \
2: \
mov %rbp, %rsp; \
pop %rbp
#else
#define PROTECTED_CLTS
#define CLEAR_TS_OR_PUSH_XMM0_XMM1(tmpreg)
#define SET_TS_OR_POP_XMM0_XMM1(tmpreg)
#define CLEAR_TS_OR_PUSH_XMM0_TO_XMM6(tmpreg)
#define SET_TS_OR_POP_XMM0_TO_XMM6(tmpreg)
#endif /* _KERNEL */
/*
* _key_expansion_128(), * _key_expansion_192a(), _key_expansion_192b(),
* _key_expansion_256a(), _key_expansion_256b()
*
* Helper functions called by rijndael_key_setup_inc_intel().
* Also used indirectly by rijndael_key_setup_dec_intel().
*
* Input:
* %xmm0 User-provided cipher key
* %xmm1 Round constant
* Output:
* (%rcx) AES key
*/
.align 16
_key_expansion_128:
_key_expansion_256a:
pshufd $0b11111111, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm4
pxor %xmm4, %xmm0
shufps $0b10001100, %xmm0, %xmm4
pxor %xmm4, %xmm0
pxor %xmm1, %xmm0
movaps %xmm0, (%rcx)
add $0x10, %rcx
ret
SET_SIZE(_key_expansion_128)
SET_SIZE(_key_expansion_256a)
.align 16
_key_expansion_192a:
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm4
pxor %xmm4, %xmm0
shufps $0b10001100, %xmm0, %xmm4
pxor %xmm4, %xmm0
pxor %xmm1, %xmm0
movaps %xmm2, %xmm5
movaps %xmm2, %xmm6
pslldq $4, %xmm5
pshufd $0b11111111, %xmm0, %xmm3
pxor %xmm3, %xmm2
pxor %xmm5, %xmm2
movaps %xmm0, %xmm1
shufps $0b01000100, %xmm0, %xmm6
movaps %xmm6, (%rcx)
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(%rcx)
add $0x20, %rcx
ret
SET_SIZE(_key_expansion_192a)
.align 16
_key_expansion_192b:
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm4
pxor %xmm4, %xmm0
shufps $0b10001100, %xmm0, %xmm4
pxor %xmm4, %xmm0
pxor %xmm1, %xmm0
movaps %xmm2, %xmm5
pslldq $4, %xmm5
pshufd $0b11111111, %xmm0, %xmm3
pxor %xmm3, %xmm2
pxor %xmm5, %xmm2
movaps %xmm0, (%rcx)
add $0x10, %rcx
ret
SET_SIZE(_key_expansion_192b)
.align 16
_key_expansion_256b:
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b00010000, %xmm2, %xmm4
pxor %xmm4, %xmm2
shufps $0b10001100, %xmm2, %xmm4
pxor %xmm4, %xmm2
pxor %xmm1, %xmm2
movaps %xmm2, (%rcx)
add $0x10, %rcx
ret
SET_SIZE(_key_expansion_256b)
/*
* rijndael_key_setup_enc_intel()
* Expand the cipher key into the encryption key schedule.
*
* For kernel code, caller is responsible for ensuring kpreempt_disable()
* has been called. This is because %xmm registers are not saved/restored.
* Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
* on entry. Otherwise, if TS is not set, save and restore %xmm registers
* on the stack.
*
* OpenSolaris interface:
* int rijndael_key_setup_enc_intel(uint32_t rk[], const uint32_t cipherKey[],
* uint64_t keyBits);
* Return value is 0 on error, number of rounds on success.
*
* Original Intel OpenSSL interface:
* int intel_AES_set_encrypt_key(const unsigned char *userKey,
* const int bits, AES_KEY *key);
* Return value is non-zero on error, 0 on success.
*/
#ifdef OPENSSL_INTERFACE
#define rijndael_key_setup_enc_intel intel_AES_set_encrypt_key
#define rijndael_key_setup_dec_intel intel_AES_set_decrypt_key
#define USERCIPHERKEY rdi /* P1, 64 bits */
#define KEYSIZE32 esi /* P2, 32 bits */
#define KEYSIZE64 rsi /* P2, 64 bits */
#define AESKEY rdx /* P3, 64 bits */
#else /* OpenSolaris Interface */
#define AESKEY rdi /* P1, 64 bits */
#define USERCIPHERKEY rsi /* P2, 64 bits */
#define KEYSIZE32 edx /* P3, 32 bits */
#define KEYSIZE64 rdx /* P3, 64 bits */
#endif /* OPENSSL_INTERFACE */
#define ROUNDS32 KEYSIZE32 /* temp */
#define ROUNDS64 KEYSIZE64 /* temp */
#define ENDAESKEY USERCIPHERKEY /* temp */
ENTRY_NP(rijndael_key_setup_enc_intel)
rijndael_key_setup_enc_intel_local:
CLEAR_TS_OR_PUSH_XMM0_TO_XMM6(%r10)
// NULL pointer sanity check
test %USERCIPHERKEY, %USERCIPHERKEY
jz .Lenc_key_invalid_param
test %AESKEY, %AESKEY
jz .Lenc_key_invalid_param
movups (%USERCIPHERKEY), %xmm0 // user key (first 16 bytes)
movaps %xmm0, (%AESKEY)
lea 0x10(%AESKEY), %rcx // key addr
pxor %xmm4, %xmm4 // xmm4 is assumed 0 in _key_expansion_x
cmp $256, %KEYSIZE32
jnz .Lenc_key192
// AES 256: 14 rounds in encryption key schedule
#ifdef OPENSSL_INTERFACE
mov $14, %ROUNDS32
movl %ROUNDS32, 240(%AESKEY) // key.rounds = 14
#endif /* OPENSSL_INTERFACE */
movups 0x10(%USERCIPHERKEY), %xmm2 // other user key (2nd 16 bytes)
movaps %xmm2, (%rcx)
add $0x10, %rcx
aeskeygenassist $0x1, %xmm2, %xmm1 // expand the key
call _key_expansion_256a
aeskeygenassist $0x1, %xmm0, %xmm1
call _key_expansion_256b
aeskeygenassist $0x2, %xmm2, %xmm1 // expand the key
call _key_expansion_256a
aeskeygenassist $0x2, %xmm0, %xmm1
call _key_expansion_256b
aeskeygenassist $0x4, %xmm2, %xmm1 // expand the key
call _key_expansion_256a
aeskeygenassist $0x4, %xmm0, %xmm1
call _key_expansion_256b
aeskeygenassist $0x8, %xmm2, %xmm1 // expand the key
call _key_expansion_256a
aeskeygenassist $0x8, %xmm0, %xmm1
call _key_expansion_256b
aeskeygenassist $0x10, %xmm2, %xmm1 // expand the key
call _key_expansion_256a
aeskeygenassist $0x10, %xmm0, %xmm1
call _key_expansion_256b
aeskeygenassist $0x20, %xmm2, %xmm1 // expand the key
call _key_expansion_256a
aeskeygenassist $0x20, %xmm0, %xmm1
call _key_expansion_256b
aeskeygenassist $0x40, %xmm2, %xmm1 // expand the key
call _key_expansion_256a
SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
#ifdef OPENSSL_INTERFACE
xor %rax, %rax // return 0 (OK)
#else /* Open Solaris Interface */
mov $14, %rax // return # rounds = 14
#endif
ret
.align 4
.Lenc_key192:
cmp $192, %KEYSIZE32
jnz .Lenc_key128
// AES 192: 12 rounds in encryption key schedule
#ifdef OPENSSL_INTERFACE
mov $12, %ROUNDS32
movl %ROUNDS32, 240(%AESKEY) // key.rounds = 12
#endif /* OPENSSL_INTERFACE */
movq 0x10(%USERCIPHERKEY), %xmm2 // other user key
aeskeygenassist $0x1, %xmm2, %xmm1 // expand the key
call _key_expansion_192a
aeskeygenassist $0x2, %xmm2, %xmm1 // expand the key
call _key_expansion_192b
aeskeygenassist $0x4, %xmm2, %xmm1 // expand the key
call _key_expansion_192a
aeskeygenassist $0x8, %xmm2, %xmm1 // expand the key
call _key_expansion_192b
aeskeygenassist $0x10, %xmm2, %xmm1 // expand the key
call _key_expansion_192a
aeskeygenassist $0x20, %xmm2, %xmm1 // expand the key
call _key_expansion_192b
aeskeygenassist $0x40, %xmm2, %xmm1 // expand the key
call _key_expansion_192a
aeskeygenassist $0x80, %xmm2, %xmm1 // expand the key
call _key_expansion_192b
SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
#ifdef OPENSSL_INTERFACE
xor %rax, %rax // return 0 (OK)
#else /* OpenSolaris Interface */
mov $12, %rax // return # rounds = 12
#endif
ret
.align 4
.Lenc_key128:
cmp $128, %KEYSIZE32
jnz .Lenc_key_invalid_key_bits
// AES 128: 10 rounds in encryption key schedule
#ifdef OPENSSL_INTERFACE
mov $10, %ROUNDS32
movl %ROUNDS32, 240(%AESKEY) // key.rounds = 10
#endif /* OPENSSL_INTERFACE */
aeskeygenassist $0x1, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x2, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x4, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x8, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x10, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x20, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x40, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x80, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x1b, %xmm0, %xmm1 // expand the key
call _key_expansion_128
aeskeygenassist $0x36, %xmm0, %xmm1 // expand the key
call _key_expansion_128
SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
#ifdef OPENSSL_INTERFACE
xor %rax, %rax // return 0 (OK)
#else /* OpenSolaris Interface */
mov $10, %rax // return # rounds = 10
#endif
ret
.Lenc_key_invalid_param:
#ifdef OPENSSL_INTERFACE
SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
mov $-1, %rax // user key or AES key pointer is NULL
ret
#else
/* FALLTHROUGH */
#endif /* OPENSSL_INTERFACE */
.Lenc_key_invalid_key_bits:
SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
#ifdef OPENSSL_INTERFACE
mov $-2, %rax // keysize is invalid
#else /* Open Solaris Interface */
xor %rax, %rax // a key pointer is NULL or invalid keysize
#endif /* OPENSSL_INTERFACE */
ret
SET_SIZE(rijndael_key_setup_enc_intel)
/*
* rijndael_key_setup_dec_intel()
* Expand the cipher key into the decryption key schedule.
*
* For kernel code, caller is responsible for ensuring kpreempt_disable()
* has been called. This is because %xmm registers are not saved/restored.
* Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
* on entry. Otherwise, if TS is not set, save and restore %xmm registers
* on the stack.
*
* OpenSolaris interface:
* int rijndael_key_setup_dec_intel(uint32_t rk[], const uint32_t cipherKey[],
* uint64_t keyBits);
* Return value is 0 on error, number of rounds on success.
* P1->P2, P2->P3, P3->P1
*
* Original Intel OpenSSL interface:
* int intel_AES_set_decrypt_key(const unsigned char *userKey,
* const int bits, AES_KEY *key);
* Return value is non-zero on error, 0 on success.
*/
ENTRY_NP(rijndael_key_setup_dec_intel)
// Generate round keys used for encryption
call rijndael_key_setup_enc_intel_local
test %rax, %rax
#ifdef OPENSSL_INTERFACE
jnz .Ldec_key_exit // Failed if returned non-0
#else /* OpenSolaris Interface */
jz .Ldec_key_exit // Failed if returned 0
#endif /* OPENSSL_INTERFACE */
CLEAR_TS_OR_PUSH_XMM0_XMM1(%r10)
/*
* Convert round keys used for encryption
* to a form usable for decryption
*/
#ifndef OPENSSL_INTERFACE /* OpenSolaris Interface */
mov %rax, %ROUNDS64 // set # rounds (10, 12, or 14)
// (already set for OpenSSL)
#endif
lea 0x10(%AESKEY), %rcx // key addr
shl $4, %ROUNDS32
add %AESKEY, %ROUNDS64
mov %ROUNDS64, %ENDAESKEY
.align 4
.Ldec_key_reorder_loop:
movaps (%AESKEY), %xmm0
movaps (%ROUNDS64), %xmm1
movaps %xmm0, (%ROUNDS64)
movaps %xmm1, (%AESKEY)
lea 0x10(%AESKEY), %AESKEY
lea -0x10(%ROUNDS64), %ROUNDS64
cmp %AESKEY, %ROUNDS64
ja .Ldec_key_reorder_loop
.align 4
.Ldec_key_inv_loop:
movaps (%rcx), %xmm0
// Convert an encryption round key to a form usable for decryption
// with the "AES Inverse Mix Columns" instruction
aesimc %xmm0, %xmm1
movaps %xmm1, (%rcx)
lea 0x10(%rcx), %rcx
cmp %ENDAESKEY, %rcx
jnz .Ldec_key_inv_loop
SET_TS_OR_POP_XMM0_XMM1(%r10)
.Ldec_key_exit:
// OpenSolaris: rax = # rounds (10, 12, or 14) or 0 for error
// OpenSSL: rax = 0 for OK, or non-zero for error
ret
SET_SIZE(rijndael_key_setup_dec_intel)
/*
* aes_encrypt_intel()
* Encrypt a single block (in and out can overlap).
*
* For kernel code, caller is responsible for ensuring kpreempt_disable()
* has been called. This is because %xmm registers are not saved/restored.
* Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
* on entry. Otherwise, if TS is not set, save and restore %xmm registers
* on the stack.
*
* Temporary register usage:
* %xmm0 State
* %xmm1 Key
*
* Original OpenSolaris Interface:
* void aes_encrypt_intel(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4])
*
* Original Intel OpenSSL Interface:
* void intel_AES_encrypt(const unsigned char *in, unsigned char *out,
* const AES_KEY *key)
*/
#ifdef OPENSSL_INTERFACE
#define aes_encrypt_intel intel_AES_encrypt
#define aes_decrypt_intel intel_AES_decrypt
#define INP rdi /* P1, 64 bits */
#define OUTP rsi /* P2, 64 bits */
#define KEYP rdx /* P3, 64 bits */
/* No NROUNDS parameter--offset 240 from KEYP saved in %ecx: */
#define NROUNDS32 ecx /* temporary, 32 bits */
#define NROUNDS cl /* temporary, 8 bits */
#else /* OpenSolaris Interface */
#define KEYP rdi /* P1, 64 bits */
#define NROUNDS esi /* P2, 32 bits */
#define INP rdx /* P3, 64 bits */
#define OUTP rcx /* P4, 64 bits */
#endif /* OPENSSL_INTERFACE */
#define STATE xmm0 /* temporary, 128 bits */
#define KEY xmm1 /* temporary, 128 bits */
ENTRY_NP(aes_encrypt_intel)
CLEAR_TS_OR_PUSH_XMM0_XMM1(%r10)
movups (%INP), %STATE // input
movaps (%KEYP), %KEY // key
#ifdef OPENSSL_INTERFACE
mov 240(%KEYP), %NROUNDS32 // round count
#else /* OpenSolaris Interface */
/* Round count is already present as P2 in %rsi/%esi */
#endif /* OPENSSL_INTERFACE */
pxor %KEY, %STATE // round 0
lea 0x30(%KEYP), %KEYP
cmp $12, %NROUNDS
jb .Lenc128
lea 0x20(%KEYP), %KEYP
je .Lenc192
// AES 256
lea 0x20(%KEYP), %KEYP
movaps -0x60(%KEYP), %KEY
aesenc %KEY, %STATE
movaps -0x50(%KEYP), %KEY
aesenc %KEY, %STATE
.align 4
.Lenc192:
// AES 192 and 256
movaps -0x40(%KEYP), %KEY
aesenc %KEY, %STATE
movaps -0x30(%KEYP), %KEY
aesenc %KEY, %STATE
.align 4
.Lenc128:
// AES 128, 192, and 256
movaps -0x20(%KEYP), %KEY
aesenc %KEY, %STATE
movaps -0x10(%KEYP), %KEY
aesenc %KEY, %STATE
movaps (%KEYP), %KEY
aesenc %KEY, %STATE
movaps 0x10(%KEYP), %KEY
aesenc %KEY, %STATE
movaps 0x20(%KEYP), %KEY
aesenc %KEY, %STATE
movaps 0x30(%KEYP), %KEY
aesenc %KEY, %STATE
movaps 0x40(%KEYP), %KEY
aesenc %KEY, %STATE
movaps 0x50(%KEYP), %KEY
aesenc %KEY, %STATE
movaps 0x60(%KEYP), %KEY
aesenc %KEY, %STATE
movaps 0x70(%KEYP), %KEY
aesenclast %KEY, %STATE // last round
movups %STATE, (%OUTP) // output
SET_TS_OR_POP_XMM0_XMM1(%r10)
ret
SET_SIZE(aes_encrypt_intel)
/*
* aes_decrypt_intel()
* Decrypt a single block (in and out can overlap).
*
* For kernel code, caller is responsible for ensuring kpreempt_disable()
* has been called. This is because %xmm registers are not saved/restored.
* Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
* on entry. Otherwise, if TS is not set, save and restore %xmm registers
* on the stack.
*
* Temporary register usage:
* %xmm0 State
* %xmm1 Key
*
* Original OpenSolaris Interface:
* void aes_decrypt_intel(const aes_ks_t *ks, int Nr,
* const uint32_t pt[4], uint32_t ct[4])/
*
* Original Intel OpenSSL Interface:
* void intel_AES_decrypt(const unsigned char *in, unsigned char *out,
* const AES_KEY *key);
*/
ENTRY_NP(aes_decrypt_intel)
CLEAR_TS_OR_PUSH_XMM0_XMM1(%r10)
movups (%INP), %STATE // input
movaps (%KEYP), %KEY // key
#ifdef OPENSSL_INTERFACE
mov 240(%KEYP), %NROUNDS32 // round count
#else /* OpenSolaris Interface */
/* Round count is already present as P2 in %rsi/%esi */
#endif /* OPENSSL_INTERFACE */
pxor %KEY, %STATE // round 0
lea 0x30(%KEYP), %KEYP
cmp $12, %NROUNDS
jb .Ldec128
lea 0x20(%KEYP), %KEYP
je .Ldec192
// AES 256
lea 0x20(%KEYP), %KEYP
movaps -0x60(%KEYP), %KEY
aesdec %KEY, %STATE
movaps -0x50(%KEYP), %KEY
aesdec %KEY, %STATE
.align 4
.Ldec192:
// AES 192 and 256
movaps -0x40(%KEYP), %KEY
aesdec %KEY, %STATE
movaps -0x30(%KEYP), %KEY
aesdec %KEY, %STATE
.align 4
.Ldec128:
// AES 128, 192, and 256
movaps -0x20(%KEYP), %KEY
aesdec %KEY, %STATE
movaps -0x10(%KEYP), %KEY
aesdec %KEY, %STATE
movaps (%KEYP), %KEY
aesdec %KEY, %STATE
movaps 0x10(%KEYP), %KEY
aesdec %KEY, %STATE
movaps 0x20(%KEYP), %KEY
aesdec %KEY, %STATE
movaps 0x30(%KEYP), %KEY
aesdec %KEY, %STATE
movaps 0x40(%KEYP), %KEY
aesdec %KEY, %STATE
movaps 0x50(%KEYP), %KEY
aesdec %KEY, %STATE
movaps 0x60(%KEYP), %KEY
aesdec %KEY, %STATE
movaps 0x70(%KEYP), %KEY
aesdeclast %KEY, %STATE // last round
movups %STATE, (%OUTP) // output
SET_TS_OR_POP_XMM0_XMM1(%r10)
ret
SET_SIZE(aes_decrypt_intel)
#endif /* lint || __lint */

View File

@ -0,0 +1,580 @@
/*
* ---------------------------------------------------------------------------
* Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software is allowed (with or without
* changes) provided that:
*
* 1. source code distributions include the above copyright notice, this
* list of conditions and the following disclaimer;
*
* 2. binary distributions include the above copyright notice, this list
* of conditions and the following disclaimer in their documentation;
*
* 3. the name of the copyright holder is not used to endorse products
* built using this software without specific written permission.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
* Issue Date: 20/12/2007
*/
#include <aes/aes_impl.h>
#include "aesopt.h"
#include "aestab.h"
#include "aestab2.h"
/*
* Initialise the key schedule from the user supplied key. The key
* length can be specified in bytes, with legal values of 16, 24
* and 32, or in bits, with legal values of 128, 192 and 256. These
* values correspond with Nk values of 4, 6 and 8 respectively.
*
* The following macros implement a single cycle in the key
* schedule generation process. The number of cycles needed
* for each cx->n_col and nk value is:
*
* nk = 4 5 6 7 8
* ------------------------------
* cx->n_col = 4 10 9 8 7 7
* cx->n_col = 5 14 11 10 9 9
* cx->n_col = 6 19 15 12 11 11
* cx->n_col = 7 21 19 16 13 14
* cx->n_col = 8 29 23 19 17 14
*/
/*
* OpenSolaris changes
* 1. Added header files aes_impl.h and aestab2.h
* 2. Changed uint_8t and uint_32t to uint8_t and uint32_t
* 3. Remove code under ifdef USE_VIA_ACE_IF_PRESENT (always undefined)
* 4. Removed always-defined ifdefs FUNCS_IN_C, ENC_KEYING_IN_C,
* AES_128, AES_192, AES_256, AES_VAR defines
* 5. Changed aes_encrypt_key* aes_decrypt_key* functions to "static void"
* 6. Changed N_COLS to MAX_AES_NB
* 7. Replaced functions aes_encrypt_key and aes_decrypt_key with
* OpenSolaris-compatible functions rijndael_key_setup_enc_amd64 and
* rijndael_key_setup_dec_amd64
* 8. cstyled code and removed lint warnings
*/
#if defined(REDUCE_CODE_SIZE)
#define ls_box ls_sub
uint32_t ls_sub(const uint32_t t, const uint32_t n);
#define inv_mcol im_sub
uint32_t im_sub(const uint32_t x);
#ifdef ENC_KS_UNROLL
#undef ENC_KS_UNROLL
#endif
#ifdef DEC_KS_UNROLL
#undef DEC_KS_UNROLL
#endif
#endif /* REDUCE_CODE_SIZE */
#define ke4(k, i) \
{ k[4 * (i) + 4] = ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
k[4 * (i) + 5] = ss[1] ^= ss[0]; \
k[4 * (i) + 6] = ss[2] ^= ss[1]; \
k[4 * (i) + 7] = ss[3] ^= ss[2]; \
}
static void
aes_encrypt_key128(const unsigned char *key, uint32_t rk[])
{
uint32_t ss[4];
rk[0] = ss[0] = word_in(key, 0);
rk[1] = ss[1] = word_in(key, 1);
rk[2] = ss[2] = word_in(key, 2);
rk[3] = ss[3] = word_in(key, 3);
#ifdef ENC_KS_UNROLL
ke4(rk, 0); ke4(rk, 1);
ke4(rk, 2); ke4(rk, 3);
ke4(rk, 4); ke4(rk, 5);
ke4(rk, 6); ke4(rk, 7);
ke4(rk, 8);
#else
{
uint32_t i;
for (i = 0; i < 9; ++i)
ke4(rk, i);
}
#endif /* ENC_KS_UNROLL */
ke4(rk, 9);
}
#define kef6(k, i) \
{ k[6 * (i) + 6] = ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
k[6 * (i) + 7] = ss[1] ^= ss[0]; \
k[6 * (i) + 8] = ss[2] ^= ss[1]; \
k[6 * (i) + 9] = ss[3] ^= ss[2]; \
}
#define ke6(k, i) \
{ kef6(k, i); \
k[6 * (i) + 10] = ss[4] ^= ss[3]; \
k[6 * (i) + 11] = ss[5] ^= ss[4]; \
}
static void
aes_encrypt_key192(const unsigned char *key, uint32_t rk[])
{
uint32_t ss[6];
rk[0] = ss[0] = word_in(key, 0);
rk[1] = ss[1] = word_in(key, 1);
rk[2] = ss[2] = word_in(key, 2);
rk[3] = ss[3] = word_in(key, 3);
rk[4] = ss[4] = word_in(key, 4);
rk[5] = ss[5] = word_in(key, 5);
#ifdef ENC_KS_UNROLL
ke6(rk, 0); ke6(rk, 1);
ke6(rk, 2); ke6(rk, 3);
ke6(rk, 4); ke6(rk, 5);
ke6(rk, 6);
#else
{
uint32_t i;
for (i = 0; i < 7; ++i)
ke6(rk, i);
}
#endif /* ENC_KS_UNROLL */
kef6(rk, 7);
}
#define kef8(k, i) \
{ k[8 * (i) + 8] = ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
k[8 * (i) + 9] = ss[1] ^= ss[0]; \
k[8 * (i) + 10] = ss[2] ^= ss[1]; \
k[8 * (i) + 11] = ss[3] ^= ss[2]; \
}
#define ke8(k, i) \
{ kef8(k, i); \
k[8 * (i) + 12] = ss[4] ^= ls_box(ss[3], 0); \
k[8 * (i) + 13] = ss[5] ^= ss[4]; \
k[8 * (i) + 14] = ss[6] ^= ss[5]; \
k[8 * (i) + 15] = ss[7] ^= ss[6]; \
}
static void
aes_encrypt_key256(const unsigned char *key, uint32_t rk[])
{
uint32_t ss[8];
rk[0] = ss[0] = word_in(key, 0);
rk[1] = ss[1] = word_in(key, 1);
rk[2] = ss[2] = word_in(key, 2);
rk[3] = ss[3] = word_in(key, 3);
rk[4] = ss[4] = word_in(key, 4);
rk[5] = ss[5] = word_in(key, 5);
rk[6] = ss[6] = word_in(key, 6);
rk[7] = ss[7] = word_in(key, 7);
#ifdef ENC_KS_UNROLL
ke8(rk, 0); ke8(rk, 1);
ke8(rk, 2); ke8(rk, 3);
ke8(rk, 4); ke8(rk, 5);
#else
{
uint32_t i;
for (i = 0; i < 6; ++i)
ke8(rk, i);
}
#endif /* ENC_KS_UNROLL */
kef8(rk, 6);
}
/*
* Expand the cipher key into the encryption key schedule.
*
* Return the number of rounds for the given cipher key size.
* The size of the key schedule depends on the number of rounds
* (which can be computed from the size of the key), i.e. 4 * (Nr + 1).
*
* Parameters:
* rk AES key schedule 32-bit array to be initialized
* cipherKey User key
* keyBits AES key size (128, 192, or 256 bits)
*/
int
rijndael_key_setup_enc_amd64(uint32_t rk[], const uint32_t cipherKey[],
int keyBits)
{
switch (keyBits) {
case 128:
aes_encrypt_key128((unsigned char *)&cipherKey[0], rk);
return (10);
case 192:
aes_encrypt_key192((unsigned char *)&cipherKey[0], rk);
return (12);
case 256:
aes_encrypt_key256((unsigned char *)&cipherKey[0], rk);
return (14);
default: /* should never get here */
break;
}
return (0);
}
/* this is used to store the decryption round keys */
/* in forward or reverse order */
#ifdef AES_REV_DKS
#define v(n, i) ((n) - (i) + 2 * ((i) & 3))
#else
#define v(n, i) (i)
#endif
#if DEC_ROUND == NO_TABLES
#define ff(x) (x)
#else
#define ff(x) inv_mcol(x)
#if defined(dec_imvars)
#define d_vars dec_imvars
#endif
#endif /* FUNCS_IN_C & DEC_KEYING_IN_C */
#define k4e(k, i) \
{ k[v(40, (4 * (i)) + 4)] = ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
k[v(40, (4 * (i)) + 5)] = ss[1] ^= ss[0]; \
k[v(40, (4 * (i)) + 6)] = ss[2] ^= ss[1]; \
k[v(40, (4 * (i)) + 7)] = ss[3] ^= ss[2]; \
}
#if 1
#define kdf4(k, i) \
{ ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \
ss[1] = ss[1] ^ ss[3]; \
ss[2] = ss[2] ^ ss[3]; \
ss[4] = ls_box(ss[(i + 3) % 4], 3) ^ t_use(r, c)[i]; \
ss[i % 4] ^= ss[4]; \
ss[4] ^= k[v(40, (4 * (i)))]; k[v(40, (4 * (i)) + 4)] = ff(ss[4]); \
ss[4] ^= k[v(40, (4 * (i)) + 1)]; k[v(40, (4 * (i)) + 5)] = ff(ss[4]); \
ss[4] ^= k[v(40, (4 * (i)) + 2)]; k[v(40, (4 * (i)) + 6)] = ff(ss[4]); \
ss[4] ^= k[v(40, (4 * (i)) + 3)]; k[v(40, (4 * (i)) + 7)] = ff(ss[4]); \
}
#define kd4(k, i) \
{ ss[4] = ls_box(ss[(i + 3) % 4], 3) ^ t_use(r, c)[i]; \
ss[i % 4] ^= ss[4]; ss[4] = ff(ss[4]); \
k[v(40, (4 * (i)) + 4)] = ss[4] ^= k[v(40, (4 * (i)))]; \
k[v(40, (4 * (i)) + 5)] = ss[4] ^= k[v(40, (4 * (i)) + 1)]; \
k[v(40, (4 * (i)) + 6)] = ss[4] ^= k[v(40, (4 * (i)) + 2)]; \
k[v(40, (4 * (i)) + 7)] = ss[4] ^= k[v(40, (4 * (i)) + 3)]; \
}
#define kdl4(k, i) \
{ ss[4] = ls_box(ss[(i + 3) % 4], 3) ^ t_use(r, c)[i]; \
ss[i % 4] ^= ss[4]; \
k[v(40, (4 * (i)) + 4)] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \
k[v(40, (4 * (i)) + 5)] = ss[1] ^ ss[3]; \
k[v(40, (4 * (i)) + 6)] = ss[0]; \
k[v(40, (4 * (i)) + 7)] = ss[1]; \
}
#else
#define kdf4(k, i) \
{ ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
k[v(40, (4 * (i)) + 4)] = ff(ss[0]); \
ss[1] ^= ss[0]; k[v(40, (4 * (i)) + 5)] = ff(ss[1]); \
ss[2] ^= ss[1]; k[v(40, (4 * (i)) + 6)] = ff(ss[2]); \
ss[3] ^= ss[2]; k[v(40, (4 * (i)) + 7)] = ff(ss[3]); \
}
#define kd4(k, i) \
{ ss[4] = ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
ss[0] ^= ss[4]; \
ss[4] = ff(ss[4]); \
k[v(40, (4 * (i)) + 4)] = ss[4] ^= k[v(40, (4 * (i)))]; \
ss[1] ^= ss[0]; \
k[v(40, (4 * (i)) + 5)] = ss[4] ^= k[v(40, (4 * (i)) + 1)]; \
ss[2] ^= ss[1]; \
k[v(40, (4 * (i)) + 6)] = ss[4] ^= k[v(40, (4 * (i)) + 2)]; \
ss[3] ^= ss[2]; \
k[v(40, (4 * (i)) + 7)] = ss[4] ^= k[v(40, (4 * (i)) + 3)]; \
}
#define kdl4(k, i) \
{ ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
k[v(40, (4 * (i)) + 4)] = ss[0]; \
ss[1] ^= ss[0]; k[v(40, (4 * (i)) + 5)] = ss[1]; \
ss[2] ^= ss[1]; k[v(40, (4 * (i)) + 6)] = ss[2]; \
ss[3] ^= ss[2]; k[v(40, (4 * (i)) + 7)] = ss[3]; \
}
#endif
static void
aes_decrypt_key128(const unsigned char *key, uint32_t rk[])
{
uint32_t ss[5];
#if defined(d_vars)
d_vars;
#endif
rk[v(40, (0))] = ss[0] = word_in(key, 0);
rk[v(40, (1))] = ss[1] = word_in(key, 1);
rk[v(40, (2))] = ss[2] = word_in(key, 2);
rk[v(40, (3))] = ss[3] = word_in(key, 3);
#ifdef DEC_KS_UNROLL
kdf4(rk, 0); kd4(rk, 1);
kd4(rk, 2); kd4(rk, 3);
kd4(rk, 4); kd4(rk, 5);
kd4(rk, 6); kd4(rk, 7);
kd4(rk, 8); kdl4(rk, 9);
#else
{
uint32_t i;
for (i = 0; i < 10; ++i)
k4e(rk, i);
#if !(DEC_ROUND == NO_TABLES)
for (i = MAX_AES_NB; i < 10 * MAX_AES_NB; ++i)
rk[i] = inv_mcol(rk[i]);
#endif
}
#endif /* DEC_KS_UNROLL */
}
#define k6ef(k, i) \
{ k[v(48, (6 * (i)) + 6)] = ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
k[v(48, (6 * (i)) + 7)] = ss[1] ^= ss[0]; \
k[v(48, (6 * (i)) + 8)] = ss[2] ^= ss[1]; \
k[v(48, (6 * (i)) + 9)] = ss[3] ^= ss[2]; \
}
#define k6e(k, i) \
{ k6ef(k, i); \
k[v(48, (6 * (i)) + 10)] = ss[4] ^= ss[3]; \
k[v(48, (6 * (i)) + 11)] = ss[5] ^= ss[4]; \
}
#define kdf6(k, i) \
{ ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
k[v(48, (6 * (i)) + 6)] = ff(ss[0]); \
ss[1] ^= ss[0]; k[v(48, (6 * (i)) + 7)] = ff(ss[1]); \
ss[2] ^= ss[1]; k[v(48, (6 * (i)) + 8)] = ff(ss[2]); \
ss[3] ^= ss[2]; k[v(48, (6 * (i)) + 9)] = ff(ss[3]); \
ss[4] ^= ss[3]; k[v(48, (6 * (i)) + 10)] = ff(ss[4]); \
ss[5] ^= ss[4]; k[v(48, (6 * (i)) + 11)] = ff(ss[5]); \
}
#define kd6(k, i) \
{ ss[6] = ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
ss[0] ^= ss[6]; ss[6] = ff(ss[6]); \
k[v(48, (6 * (i)) + 6)] = ss[6] ^= k[v(48, (6 * (i)))]; \
ss[1] ^= ss[0]; \
k[v(48, (6 * (i)) + 7)] = ss[6] ^= k[v(48, (6 * (i)) + 1)]; \
ss[2] ^= ss[1]; \
k[v(48, (6 * (i)) + 8)] = ss[6] ^= k[v(48, (6 * (i)) + 2)]; \
ss[3] ^= ss[2]; \
k[v(48, (6 * (i)) + 9)] = ss[6] ^= k[v(48, (6 * (i)) + 3)]; \
ss[4] ^= ss[3]; \
k[v(48, (6 * (i)) + 10)] = ss[6] ^= k[v(48, (6 * (i)) + 4)]; \
ss[5] ^= ss[4]; \
k[v(48, (6 * (i)) + 11)] = ss[6] ^= k[v(48, (6 * (i)) + 5)]; \
}
#define kdl6(k, i) \
{ ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
k[v(48, (6 * (i)) + 6)] = ss[0]; \
ss[1] ^= ss[0]; k[v(48, (6 * (i)) + 7)] = ss[1]; \
ss[2] ^= ss[1]; k[v(48, (6 * (i)) + 8)] = ss[2]; \
ss[3] ^= ss[2]; k[v(48, (6 * (i)) + 9)] = ss[3]; \
}
static void
aes_decrypt_key192(const unsigned char *key, uint32_t rk[])
{
uint32_t ss[7];
#if defined(d_vars)
d_vars;
#endif
rk[v(48, (0))] = ss[0] = word_in(key, 0);
rk[v(48, (1))] = ss[1] = word_in(key, 1);
rk[v(48, (2))] = ss[2] = word_in(key, 2);
rk[v(48, (3))] = ss[3] = word_in(key, 3);
#ifdef DEC_KS_UNROLL
ss[4] = word_in(key, 4);
rk[v(48, (4))] = ff(ss[4]);
ss[5] = word_in(key, 5);
rk[v(48, (5))] = ff(ss[5]);
kdf6(rk, 0); kd6(rk, 1);
kd6(rk, 2); kd6(rk, 3);
kd6(rk, 4); kd6(rk, 5);
kd6(rk, 6); kdl6(rk, 7);
#else
rk[v(48, (4))] = ss[4] = word_in(key, 4);
rk[v(48, (5))] = ss[5] = word_in(key, 5);
{
uint32_t i;
for (i = 0; i < 7; ++i)
k6e(rk, i);
k6ef(rk, 7);
#if !(DEC_ROUND == NO_TABLES)
for (i = MAX_AES_NB; i < 12 * MAX_AES_NB; ++i)
rk[i] = inv_mcol(rk[i]);
#endif
}
#endif
}
#define k8ef(k, i) \
{ k[v(56, (8 * (i)) + 8)] = ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
k[v(56, (8 * (i)) + 9)] = ss[1] ^= ss[0]; \
k[v(56, (8 * (i)) + 10)] = ss[2] ^= ss[1]; \
k[v(56, (8 * (i)) + 11)] = ss[3] ^= ss[2]; \
}
#define k8e(k, i) \
{ k8ef(k, i); \
k[v(56, (8 * (i)) + 12)] = ss[4] ^= ls_box(ss[3], 0); \
k[v(56, (8 * (i)) + 13)] = ss[5] ^= ss[4]; \
k[v(56, (8 * (i)) + 14)] = ss[6] ^= ss[5]; \
k[v(56, (8 * (i)) + 15)] = ss[7] ^= ss[6]; \
}
#define kdf8(k, i) \
{ ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
k[v(56, (8 * (i)) + 8)] = ff(ss[0]); \
ss[1] ^= ss[0]; k[v(56, (8 * (i)) + 9)] = ff(ss[1]); \
ss[2] ^= ss[1]; k[v(56, (8 * (i)) + 10)] = ff(ss[2]); \
ss[3] ^= ss[2]; k[v(56, (8 * (i)) + 11)] = ff(ss[3]); \
ss[4] ^= ls_box(ss[3], 0); k[v(56, (8 * (i)) + 12)] = ff(ss[4]); \
ss[5] ^= ss[4]; k[v(56, (8 * (i)) + 13)] = ff(ss[5]); \
ss[6] ^= ss[5]; k[v(56, (8 * (i)) + 14)] = ff(ss[6]); \
ss[7] ^= ss[6]; k[v(56, (8 * (i)) + 15)] = ff(ss[7]); \
}
#define kd8(k, i) \
{ ss[8] = ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
ss[0] ^= ss[8]; \
ss[8] = ff(ss[8]); \
k[v(56, (8 * (i)) + 8)] = ss[8] ^= k[v(56, (8 * (i)))]; \
ss[1] ^= ss[0]; \
k[v(56, (8 * (i)) + 9)] = ss[8] ^= k[v(56, (8 * (i)) + 1)]; \
ss[2] ^= ss[1]; \
k[v(56, (8 * (i)) + 10)] = ss[8] ^= k[v(56, (8 * (i)) + 2)]; \
ss[3] ^= ss[2]; \
k[v(56, (8 * (i)) + 11)] = ss[8] ^= k[v(56, (8 * (i)) + 3)]; \
ss[8] = ls_box(ss[3], 0); \
ss[4] ^= ss[8]; \
ss[8] = ff(ss[8]); \
k[v(56, (8 * (i)) + 12)] = ss[8] ^= k[v(56, (8 * (i)) + 4)]; \
ss[5] ^= ss[4]; \
k[v(56, (8 * (i)) + 13)] = ss[8] ^= k[v(56, (8 * (i)) + 5)]; \
ss[6] ^= ss[5]; \
k[v(56, (8 * (i)) + 14)] = ss[8] ^= k[v(56, (8 * (i)) + 6)]; \
ss[7] ^= ss[6]; \
k[v(56, (8 * (i)) + 15)] = ss[8] ^= k[v(56, (8 * (i)) + 7)]; \
}
#define kdl8(k, i) \
{ ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
k[v(56, (8 * (i)) + 8)] = ss[0]; \
ss[1] ^= ss[0]; k[v(56, (8 * (i)) + 9)] = ss[1]; \
ss[2] ^= ss[1]; k[v(56, (8 * (i)) + 10)] = ss[2]; \
ss[3] ^= ss[2]; k[v(56, (8 * (i)) + 11)] = ss[3]; \
}
static void
aes_decrypt_key256(const unsigned char *key, uint32_t rk[])
{
uint32_t ss[9];
#if defined(d_vars)
d_vars;
#endif
rk[v(56, (0))] = ss[0] = word_in(key, 0);
rk[v(56, (1))] = ss[1] = word_in(key, 1);
rk[v(56, (2))] = ss[2] = word_in(key, 2);
rk[v(56, (3))] = ss[3] = word_in(key, 3);
#ifdef DEC_KS_UNROLL
ss[4] = word_in(key, 4);
rk[v(56, (4))] = ff(ss[4]);
ss[5] = word_in(key, 5);
rk[v(56, (5))] = ff(ss[5]);
ss[6] = word_in(key, 6);
rk[v(56, (6))] = ff(ss[6]);
ss[7] = word_in(key, 7);
rk[v(56, (7))] = ff(ss[7]);
kdf8(rk, 0); kd8(rk, 1);
kd8(rk, 2); kd8(rk, 3);
kd8(rk, 4); kd8(rk, 5);
kdl8(rk, 6);
#else
rk[v(56, (4))] = ss[4] = word_in(key, 4);
rk[v(56, (5))] = ss[5] = word_in(key, 5);
rk[v(56, (6))] = ss[6] = word_in(key, 6);
rk[v(56, (7))] = ss[7] = word_in(key, 7);
{
uint32_t i;
for (i = 0; i < 6; ++i)
k8e(rk, i);
k8ef(rk, 6);
#if !(DEC_ROUND == NO_TABLES)
for (i = MAX_AES_NB; i < 14 * MAX_AES_NB; ++i)
rk[i] = inv_mcol(rk[i]);
#endif
}
#endif /* DEC_KS_UNROLL */
}
/*
* Expand the cipher key into the decryption key schedule.
*
* Return the number of rounds for the given cipher key size.
* The size of the key schedule depends on the number of rounds
* (which can be computed from the size of the key), i.e. 4 * (Nr + 1).
*
* Parameters:
* rk AES key schedule 32-bit array to be initialized
* cipherKey User key
* keyBits AES key size (128, 192, or 256 bits)
*/
int
rijndael_key_setup_dec_amd64(uint32_t rk[], const uint32_t cipherKey[],
int keyBits)
{
switch (keyBits) {
case 128:
aes_decrypt_key128((unsigned char *)&cipherKey[0], rk);
return (10);
case 192:
aes_decrypt_key192((unsigned char *)&cipherKey[0], rk);
return (12);
case 256:
aes_decrypt_key256((unsigned char *)&cipherKey[0], rk);
return (14);
default: /* should never get here */
break;
}
return (0);
}

View File

@ -0,0 +1,770 @@
/*
* ---------------------------------------------------------------------------
* Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software is allowed (with or without
* changes) provided that:
*
* 1. source code distributions include the above copyright notice, this
* list of conditions and the following disclaimer;
*
* 2. binary distributions include the above copyright notice, this list
* of conditions and the following disclaimer in their documentation;
*
* 3. the name of the copyright holder is not used to endorse products
* built using this software without specific written permission.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
* Issue Date: 20/12/2007
*
* This file contains the compilation options for AES (Rijndael) and code
* that is common across encryption, key scheduling and table generation.
*
* OPERATION
*
* These source code files implement the AES algorithm Rijndael designed by
* Joan Daemen and Vincent Rijmen. This version is designed for the standard
* block size of 16 bytes and for key sizes of 128, 192 and 256 bits (16, 24
* and 32 bytes).
*
* This version is designed for flexibility and speed using operations on
* 32-bit words rather than operations on bytes. It can be compiled with
* either big or little endian internal byte order but is faster when the
* native byte order for the processor is used.
*
* THE CIPHER INTERFACE
*
* The cipher interface is implemented as an array of bytes in which lower
* AES bit sequence indexes map to higher numeric significance within bytes.
*/
/*
* OpenSolaris changes
* 1. Added __cplusplus and _AESTAB_H header guards
* 2. Added header files sys/types.h and aes_impl.h
* 3. Added defines for AES_ENCRYPT, AES_DECRYPT, AES_REV_DKS, and ASM_AMD64_C
* 4. Moved defines for IS_BIG_ENDIAN, IS_LITTLE_ENDIAN, PLATFORM_BYTE_ORDER
* from brg_endian.h
* 5. Undefined VIA_ACE_POSSIBLE and ASSUME_VIA_ACE_PRESENT
* 6. Changed uint_8t and uint_32t to uint8_t and uint32_t
* 7. Defined aes_sw32 as htonl() for byte swapping
* 8. Cstyled and hdrchk code
*
*/
#ifndef _AESOPT_H
#define _AESOPT_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <aes/aes_impl.h>
/* SUPPORT FEATURES */
#define AES_ENCRYPT /* if support for encryption is needed */
#define AES_DECRYPT /* if support for decryption is needed */
/* PLATFORM-SPECIFIC FEATURES */
#define IS_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
#define IS_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
#define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
#define AES_REV_DKS /* define to reverse decryption key schedule */
/*
* CONFIGURATION - THE USE OF DEFINES
* Later in this section there are a number of defines that control the
* operation of the code. In each section, the purpose of each define is
* explained so that the relevant form can be included or excluded by
* setting either 1's or 0's respectively on the branches of the related
* #if clauses. The following local defines should not be changed.
*/
#define ENCRYPTION_IN_C 1
#define DECRYPTION_IN_C 2
#define ENC_KEYING_IN_C 4
#define DEC_KEYING_IN_C 8
#define NO_TABLES 0
#define ONE_TABLE 1
#define FOUR_TABLES 4
#define NONE 0
#define PARTIAL 1
#define FULL 2
/* --- START OF USER CONFIGURED OPTIONS --- */
/*
* 1. BYTE ORDER WITHIN 32 BIT WORDS
*
* The fundamental data processing units in Rijndael are 8-bit bytes. The
* input, output and key input are all enumerated arrays of bytes in which
* bytes are numbered starting at zero and increasing to one less than the
* number of bytes in the array in question. This enumeration is only used
* for naming bytes and does not imply any adjacency or order relationship
* from one byte to another. When these inputs and outputs are considered
* as bit sequences, bits 8*n to 8*n+7 of the bit sequence are mapped to
* byte[n] with bit 8n+i in the sequence mapped to bit 7-i within the byte.
* In this implementation bits are numbered from 0 to 7 starting at the
* numerically least significant end of each byte. Bit n represents 2^n.
*
* However, Rijndael can be implemented more efficiently using 32-bit
* words by packing bytes into words so that bytes 4*n to 4*n+3 are placed
* into word[n]. While in principle these bytes can be assembled into words
* in any positions, this implementation only supports the two formats in
* which bytes in adjacent positions within words also have adjacent byte
* numbers. This order is called big-endian if the lowest numbered bytes
* in words have the highest numeric significance and little-endian if the
* opposite applies.
*
* This code can work in either order irrespective of the order used by the
* machine on which it runs. Normally the internal byte order will be set
* to the order of the processor on which the code is to be run but this
* define can be used to reverse this in special situations
*
* WARNING: Assembler code versions rely on PLATFORM_BYTE_ORDER being set.
* This define will hence be redefined later (in section 4) if necessary
*/
#if 1
#define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
#elif 0
#define ALGORITHM_BYTE_ORDER IS_LITTLE_ENDIAN
#elif 0
#define ALGORITHM_BYTE_ORDER IS_BIG_ENDIAN
#else
#error The algorithm byte order is not defined
#endif
/* 2. VIA ACE SUPPORT */
#if defined(__GNUC__) && defined(__i386__) || \
defined(_WIN32) && defined(_M_IX86) && \
!(defined(_WIN64) || defined(_WIN32_WCE) || \
defined(_MSC_VER) && (_MSC_VER <= 800))
#define VIA_ACE_POSSIBLE
#endif
/*
* Define this option if support for the VIA ACE is required. This uses
* inline assembler instructions and is only implemented for the Microsoft,
* Intel and GCC compilers. If VIA ACE is known to be present, then defining
* ASSUME_VIA_ACE_PRESENT will remove the ordinary encryption/decryption
* code. If USE_VIA_ACE_IF_PRESENT is defined then VIA ACE will be used if
* it is detected (both present and enabled) but the normal AES code will
* also be present.
*
* When VIA ACE is to be used, all AES encryption contexts MUST be 16 byte
* aligned; other input/output buffers do not need to be 16 byte aligned
* but there are very large performance gains if this can be arranged.
* VIA ACE also requires the decryption key schedule to be in reverse
* order (which later checks below ensure).
*/
/* VIA ACE is not used here for OpenSolaris: */
#undef VIA_ACE_POSSIBLE
#undef ASSUME_VIA_ACE_PRESENT
#if 0 && defined(VIA_ACE_POSSIBLE) && !defined(USE_VIA_ACE_IF_PRESENT)
#define USE_VIA_ACE_IF_PRESENT
#endif
#if 0 && defined(VIA_ACE_POSSIBLE) && !defined(ASSUME_VIA_ACE_PRESENT)
#define ASSUME_VIA_ACE_PRESENT
#endif
/*
* 3. ASSEMBLER SUPPORT
*
* This define (which can be on the command line) enables the use of the
* assembler code routines for encryption, decryption and key scheduling
* as follows:
*
* ASM_X86_V1C uses the assembler (aes_x86_v1.asm) with large tables for
* encryption and decryption and but with key scheduling in C
* ASM_X86_V2 uses assembler (aes_x86_v2.asm) with compressed tables for
* encryption, decryption and key scheduling
* ASM_X86_V2C uses assembler (aes_x86_v2.asm) with compressed tables for
* encryption and decryption and but with key scheduling in C
* ASM_AMD64_C uses assembler (aes_amd64.asm) with compressed tables for
* encryption and decryption and but with key scheduling in C
*
* Change one 'if 0' below to 'if 1' to select the version or define
* as a compilation option.
*/
#if 0 && !defined(ASM_X86_V1C)
#define ASM_X86_V1C
#elif 0 && !defined(ASM_X86_V2)
#define ASM_X86_V2
#elif 0 && !defined(ASM_X86_V2C)
#define ASM_X86_V2C
#elif 1 && !defined(ASM_AMD64_C)
#define ASM_AMD64_C
#endif
#if (defined(ASM_X86_V1C) || defined(ASM_X86_V2) || defined(ASM_X86_V2C)) && \
!defined(_M_IX86) || defined(ASM_AMD64_C) && !defined(_M_X64) && \
!defined(__amd64)
#error Assembler code is only available for x86 and AMD64 systems
#endif
/*
* 4. FAST INPUT/OUTPUT OPERATIONS.
*
* On some machines it is possible to improve speed by transferring the
* bytes in the input and output arrays to and from the internal 32-bit
* variables by addressing these arrays as if they are arrays of 32-bit
* words. On some machines this will always be possible but there may
* be a large performance penalty if the byte arrays are not aligned on
* the normal word boundaries. On other machines this technique will
* lead to memory access errors when such 32-bit word accesses are not
* properly aligned. The option SAFE_IO avoids such problems but will
* often be slower on those machines that support misaligned access
* (especially so if care is taken to align the input and output byte
* arrays on 32-bit word boundaries). If SAFE_IO is not defined it is
* assumed that access to byte arrays as if they are arrays of 32-bit
* words will not cause problems when such accesses are misaligned.
*/
#if 1 && !defined(_MSC_VER)
#define SAFE_IO
#endif
/*
* 5. LOOP UNROLLING
*
* The code for encryption and decryption cycles through a number of rounds
* that can be implemented either in a loop or by expanding the code into a
* long sequence of instructions, the latter producing a larger program but
* one that will often be much faster. The latter is called loop unrolling.
* There are also potential speed advantages in expanding two iterations in
* a loop with half the number of iterations, which is called partial loop
* unrolling. The following options allow partial or full loop unrolling
* to be set independently for encryption and decryption
*/
#if 1
#define ENC_UNROLL FULL
#elif 0
#define ENC_UNROLL PARTIAL
#else
#define ENC_UNROLL NONE
#endif
#if 1
#define DEC_UNROLL FULL
#elif 0
#define DEC_UNROLL PARTIAL
#else
#define DEC_UNROLL NONE
#endif
#if 1
#define ENC_KS_UNROLL
#endif
#if 1
#define DEC_KS_UNROLL
#endif
/*
* 6. FAST FINITE FIELD OPERATIONS
*
* If this section is included, tables are used to provide faster finite
* field arithmetic. This has no effect if FIXED_TABLES is defined.
*/
#if 1
#define FF_TABLES
#endif
/*
* 7. INTERNAL STATE VARIABLE FORMAT
*
* The internal state of Rijndael is stored in a number of local 32-bit
* word variables which can be defined either as an array or as individual
* names variables. Include this section if you want to store these local
* variables in arrays. Otherwise individual local variables will be used.
*/
#if 1
#define ARRAYS
#endif
/*
* 8. FIXED OR DYNAMIC TABLES
*
* When this section is included the tables used by the code are compiled
* statically into the binary file. Otherwise the subroutine aes_init()
* must be called to compute them before the code is first used.
*/
#if 1 && !(defined(_MSC_VER) && (_MSC_VER <= 800))
#define FIXED_TABLES
#endif
/*
* 9. MASKING OR CASTING FROM LONGER VALUES TO BYTES
*
* In some systems it is better to mask longer values to extract bytes
* rather than using a cast. This option allows this choice.
*/
#if 0
#define to_byte(x) ((uint8_t)(x))
#else
#define to_byte(x) ((x) & 0xff)
#endif
/*
* 10. TABLE ALIGNMENT
*
* On some systems speed will be improved by aligning the AES large lookup
* tables on particular boundaries. This define should be set to a power of
* two giving the desired alignment. It can be left undefined if alignment
* is not needed. This option is specific to the Micrsoft VC++ compiler -
* it seems to sometimes cause trouble for the VC++ version 6 compiler.
*/
#if 1 && defined(_MSC_VER) && (_MSC_VER >= 1300)
#define TABLE_ALIGN 32
#endif
/*
* 11. REDUCE CODE AND TABLE SIZE
*
* This replaces some expanded macros with function calls if AES_ASM_V2 or
* AES_ASM_V2C are defined
*/
#if 1 && (defined(ASM_X86_V2) || defined(ASM_X86_V2C))
#define REDUCE_CODE_SIZE
#endif
/*
* 12. TABLE OPTIONS
*
* This cipher proceeds by repeating in a number of cycles known as rounds
* which are implemented by a round function which is optionally be speeded
* up using tables. The basic tables are 256 32-bit words, with either
* one or four tables being required for each round function depending on
* how much speed is required. Encryption and decryption round functions
* are different and the last encryption and decryption round functions are
* different again making four different round functions in all.
*
* This means that:
* 1. Normal encryption and decryption rounds can each use either 0, 1
* or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
* 2. The last encryption and decryption rounds can also use either 0, 1
* or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
*
* Include or exclude the appropriate definitions below to set the number
* of tables used by this implementation.
*/
#if 1 /* set tables for the normal encryption round */
#define ENC_ROUND FOUR_TABLES
#elif 0
#define ENC_ROUND ONE_TABLE
#else
#define ENC_ROUND NO_TABLES
#endif
#if 1 /* set tables for the last encryption round */
#define LAST_ENC_ROUND FOUR_TABLES
#elif 0
#define LAST_ENC_ROUND ONE_TABLE
#else
#define LAST_ENC_ROUND NO_TABLES
#endif
#if 1 /* set tables for the normal decryption round */
#define DEC_ROUND FOUR_TABLES
#elif 0
#define DEC_ROUND ONE_TABLE
#else
#define DEC_ROUND NO_TABLES
#endif
#if 1 /* set tables for the last decryption round */
#define LAST_DEC_ROUND FOUR_TABLES
#elif 0
#define LAST_DEC_ROUND ONE_TABLE
#else
#define LAST_DEC_ROUND NO_TABLES
#endif
/*
* The decryption key schedule can be speeded up with tables in the same
* way that the round functions can. Include or exclude the following
* defines to set this requirement.
*/
#if 1
#define KEY_SCHED FOUR_TABLES
#elif 0
#define KEY_SCHED ONE_TABLE
#else
#define KEY_SCHED NO_TABLES
#endif
/* ---- END OF USER CONFIGURED OPTIONS ---- */
/* VIA ACE support is only available for VC++ and GCC */
#if !defined(_MSC_VER) && !defined(__GNUC__)
#if defined(ASSUME_VIA_ACE_PRESENT)
#undef ASSUME_VIA_ACE_PRESENT
#endif
#if defined(USE_VIA_ACE_IF_PRESENT)
#undef USE_VIA_ACE_IF_PRESENT
#endif
#endif
#if defined(ASSUME_VIA_ACE_PRESENT) && !defined(USE_VIA_ACE_IF_PRESENT)
#define USE_VIA_ACE_IF_PRESENT
#endif
#if defined(USE_VIA_ACE_IF_PRESENT) && !defined(AES_REV_DKS)
#define AES_REV_DKS
#endif
/* Assembler support requires the use of platform byte order */
#if (defined(ASM_X86_V1C) || defined(ASM_X86_V2C) || defined(ASM_AMD64_C)) && \
(ALGORITHM_BYTE_ORDER != PLATFORM_BYTE_ORDER)
#undef ALGORITHM_BYTE_ORDER
#define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
#endif
/*
* In this implementation the columns of the state array are each held in
* 32-bit words. The state array can be held in various ways: in an array
* of words, in a number of individual word variables or in a number of
* processor registers. The following define maps a variable name x and
* a column number c to the way the state array variable is to be held.
* The first define below maps the state into an array x[c] whereas the
* second form maps the state into a number of individual variables x0,
* x1, etc. Another form could map individual state columns to machine
* register names.
*/
#if defined(ARRAYS)
#define s(x, c) x[c]
#else
#define s(x, c) x##c
#endif
/*
* This implementation provides subroutines for encryption, decryption
* and for setting the three key lengths (separately) for encryption
* and decryption. Since not all functions are needed, masks are set
* up here to determine which will be implemented in C
*/
#if !defined(AES_ENCRYPT)
#define EFUNCS_IN_C 0
#elif defined(ASSUME_VIA_ACE_PRESENT) || defined(ASM_X86_V1C) || \
defined(ASM_X86_V2C) || defined(ASM_AMD64_C)
#define EFUNCS_IN_C ENC_KEYING_IN_C
#elif !defined(ASM_X86_V2)
#define EFUNCS_IN_C (ENCRYPTION_IN_C | ENC_KEYING_IN_C)
#else
#define EFUNCS_IN_C 0
#endif
#if !defined(AES_DECRYPT)
#define DFUNCS_IN_C 0
#elif defined(ASSUME_VIA_ACE_PRESENT) || defined(ASM_X86_V1C) || \
defined(ASM_X86_V2C) || defined(ASM_AMD64_C)
#define DFUNCS_IN_C DEC_KEYING_IN_C
#elif !defined(ASM_X86_V2)
#define DFUNCS_IN_C (DECRYPTION_IN_C | DEC_KEYING_IN_C)
#else
#define DFUNCS_IN_C 0
#endif
#define FUNCS_IN_C (EFUNCS_IN_C | DFUNCS_IN_C)
/* END OF CONFIGURATION OPTIONS */
/* Disable or report errors on some combinations of options */
#if ENC_ROUND == NO_TABLES && LAST_ENC_ROUND != NO_TABLES
#undef LAST_ENC_ROUND
#define LAST_ENC_ROUND NO_TABLES
#elif ENC_ROUND == ONE_TABLE && LAST_ENC_ROUND == FOUR_TABLES
#undef LAST_ENC_ROUND
#define LAST_ENC_ROUND ONE_TABLE
#endif
#if ENC_ROUND == NO_TABLES && ENC_UNROLL != NONE
#undef ENC_UNROLL
#define ENC_UNROLL NONE
#endif
#if DEC_ROUND == NO_TABLES && LAST_DEC_ROUND != NO_TABLES
#undef LAST_DEC_ROUND
#define LAST_DEC_ROUND NO_TABLES
#elif DEC_ROUND == ONE_TABLE && LAST_DEC_ROUND == FOUR_TABLES
#undef LAST_DEC_ROUND
#define LAST_DEC_ROUND ONE_TABLE
#endif
#if DEC_ROUND == NO_TABLES && DEC_UNROLL != NONE
#undef DEC_UNROLL
#define DEC_UNROLL NONE
#endif
#if (ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN)
#define aes_sw32 htonl
#elif defined(bswap32)
#define aes_sw32 bswap32
#elif defined(bswap_32)
#define aes_sw32 bswap_32
#else
#define brot(x, n) (((uint32_t)(x) << (n)) | ((uint32_t)(x) >> (32 - (n))))
#define aes_sw32(x) ((brot((x), 8) & 0x00ff00ff) | (brot((x), 24) & 0xff00ff00))
#endif
/*
* upr(x, n): rotates bytes within words by n positions, moving bytes to
* higher index positions with wrap around into low positions
* ups(x, n): moves bytes by n positions to higher index positions in
* words but without wrap around
* bval(x, n): extracts a byte from a word
*
* WARNING: The definitions given here are intended only for use with
* unsigned variables and with shift counts that are compile
* time constants
*/
#if (ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN)
#define upr(x, n) (((uint32_t)(x) << (8 * (n))) | \
((uint32_t)(x) >> (32 - 8 * (n))))
#define ups(x, n) ((uint32_t)(x) << (8 * (n)))
#define bval(x, n) to_byte((x) >> (8 * (n)))
#define bytes2word(b0, b1, b2, b3) \
(((uint32_t)(b3) << 24) | ((uint32_t)(b2) << 16) | \
((uint32_t)(b1) << 8) | (b0))
#endif
#if (ALGORITHM_BYTE_ORDER == IS_BIG_ENDIAN)
#define upr(x, n) (((uint32_t)(x) >> (8 * (n))) | \
((uint32_t)(x) << (32 - 8 * (n))))
#define ups(x, n) ((uint32_t)(x) >> (8 * (n)))
#define bval(x, n) to_byte((x) >> (24 - 8 * (n)))
#define bytes2word(b0, b1, b2, b3) \
(((uint32_t)(b0) << 24) | ((uint32_t)(b1) << 16) | \
((uint32_t)(b2) << 8) | (b3))
#endif
#if defined(SAFE_IO)
#define word_in(x, c) bytes2word(((const uint8_t *)(x) + 4 * c)[0], \
((const uint8_t *)(x) + 4 * c)[1], \
((const uint8_t *)(x) + 4 * c)[2], \
((const uint8_t *)(x) + 4 * c)[3])
#define word_out(x, c, v) { ((uint8_t *)(x) + 4 * c)[0] = bval(v, 0); \
((uint8_t *)(x) + 4 * c)[1] = bval(v, 1); \
((uint8_t *)(x) + 4 * c)[2] = bval(v, 2); \
((uint8_t *)(x) + 4 * c)[3] = bval(v, 3); }
#elif (ALGORITHM_BYTE_ORDER == PLATFORM_BYTE_ORDER)
#define word_in(x, c) (*((uint32_t *)(x) + (c)))
#define word_out(x, c, v) (*((uint32_t *)(x) + (c)) = (v))
#else
#define word_in(x, c) aes_sw32(*((uint32_t *)(x) + (c)))
#define word_out(x, c, v) (*((uint32_t *)(x) + (c)) = aes_sw32(v))
#endif
/* the finite field modular polynomial and elements */
#define WPOLY 0x011b
#define BPOLY 0x1b
/* multiply four bytes in GF(2^8) by 'x' {02} in parallel */
#define m1 0x80808080
#define m2 0x7f7f7f7f
#define gf_mulx(x) ((((x) & m2) << 1) ^ ((((x) & m1) >> 7) * BPOLY))
/*
* The following defines provide alternative definitions of gf_mulx that might
* give improved performance if a fast 32-bit multiply is not available. Note
* that a temporary variable u needs to be defined where gf_mulx is used.
*
* #define gf_mulx(x) (u = (x) & m1, u |= (u >> 1), ((x) & m2) << 1) ^ \
* ((u >> 3) | (u >> 6))
* #define m4 (0x01010101 * BPOLY)
* #define gf_mulx(x) (u = (x) & m1, ((x) & m2) << 1) ^ ((u - (u >> 7)) \
* & m4)
*/
/* Work out which tables are needed for the different options */
#if defined(ASM_X86_V1C)
#if defined(ENC_ROUND)
#undef ENC_ROUND
#endif
#define ENC_ROUND FOUR_TABLES
#if defined(LAST_ENC_ROUND)
#undef LAST_ENC_ROUND
#endif
#define LAST_ENC_ROUND FOUR_TABLES
#if defined(DEC_ROUND)
#undef DEC_ROUND
#endif
#define DEC_ROUND FOUR_TABLES
#if defined(LAST_DEC_ROUND)
#undef LAST_DEC_ROUND
#endif
#define LAST_DEC_ROUND FOUR_TABLES
#if defined(KEY_SCHED)
#undef KEY_SCHED
#define KEY_SCHED FOUR_TABLES
#endif
#endif
#if (FUNCS_IN_C & ENCRYPTION_IN_C) || defined(ASM_X86_V1C)
#if ENC_ROUND == ONE_TABLE
#define FT1_SET
#elif ENC_ROUND == FOUR_TABLES
#define FT4_SET
#else
#define SBX_SET
#endif
#if LAST_ENC_ROUND == ONE_TABLE
#define FL1_SET
#elif LAST_ENC_ROUND == FOUR_TABLES
#define FL4_SET
#elif !defined(SBX_SET)
#define SBX_SET
#endif
#endif
#if (FUNCS_IN_C & DECRYPTION_IN_C) || defined(ASM_X86_V1C)
#if DEC_ROUND == ONE_TABLE
#define IT1_SET
#elif DEC_ROUND == FOUR_TABLES
#define IT4_SET
#else
#define ISB_SET
#endif
#if LAST_DEC_ROUND == ONE_TABLE
#define IL1_SET
#elif LAST_DEC_ROUND == FOUR_TABLES
#define IL4_SET
#elif !defined(ISB_SET)
#define ISB_SET
#endif
#endif
#if !(defined(REDUCE_CODE_SIZE) && (defined(ASM_X86_V2) || \
defined(ASM_X86_V2C)))
#if ((FUNCS_IN_C & ENC_KEYING_IN_C) || (FUNCS_IN_C & DEC_KEYING_IN_C))
#if KEY_SCHED == ONE_TABLE
#if !defined(FL1_SET) && !defined(FL4_SET)
#define LS1_SET
#endif
#elif KEY_SCHED == FOUR_TABLES
#if !defined(FL4_SET)
#define LS4_SET
#endif
#elif !defined(SBX_SET)
#define SBX_SET
#endif
#endif
#if (FUNCS_IN_C & DEC_KEYING_IN_C)
#if KEY_SCHED == ONE_TABLE
#define IM1_SET
#elif KEY_SCHED == FOUR_TABLES
#define IM4_SET
#elif !defined(SBX_SET)
#define SBX_SET
#endif
#endif
#endif
/* generic definitions of Rijndael macros that use tables */
#define no_table(x, box, vf, rf, c) bytes2word(\
box[bval(vf(x, 0, c), rf(0, c))], \
box[bval(vf(x, 1, c), rf(1, c))], \
box[bval(vf(x, 2, c), rf(2, c))], \
box[bval(vf(x, 3, c), rf(3, c))])
#define one_table(x, op, tab, vf, rf, c) \
(tab[bval(vf(x, 0, c), rf(0, c))] \
^ op(tab[bval(vf(x, 1, c), rf(1, c))], 1) \
^ op(tab[bval(vf(x, 2, c), rf(2, c))], 2) \
^ op(tab[bval(vf(x, 3, c), rf(3, c))], 3))
#define four_tables(x, tab, vf, rf, c) \
(tab[0][bval(vf(x, 0, c), rf(0, c))] \
^ tab[1][bval(vf(x, 1, c), rf(1, c))] \
^ tab[2][bval(vf(x, 2, c), rf(2, c))] \
^ tab[3][bval(vf(x, 3, c), rf(3, c))])
#define vf1(x, r, c) (x)
#define rf1(r, c) (r)
#define rf2(r, c) ((8+r-c)&3)
/*
* Perform forward and inverse column mix operation on four bytes in long word
* x in parallel. NOTE: x must be a simple variable, NOT an expression in
* these macros.
*/
#if !(defined(REDUCE_CODE_SIZE) && (defined(ASM_X86_V2) || \
defined(ASM_X86_V2C)))
#if defined(FM4_SET) /* not currently used */
#define fwd_mcol(x) four_tables(x, t_use(f, m), vf1, rf1, 0)
#elif defined(FM1_SET) /* not currently used */
#define fwd_mcol(x) one_table(x, upr, t_use(f, m), vf1, rf1, 0)
#else
#define dec_fmvars uint32_t g2
#define fwd_mcol(x) (g2 = gf_mulx(x), g2 ^ upr((x) ^ g2, 3) ^ \
upr((x), 2) ^ upr((x), 1))
#endif
#if defined(IM4_SET)
#define inv_mcol(x) four_tables(x, t_use(i, m), vf1, rf1, 0)
#elif defined(IM1_SET)
#define inv_mcol(x) one_table(x, upr, t_use(i, m), vf1, rf1, 0)
#else
#define dec_imvars uint32_t g2, g4, g9
#define inv_mcol(x) (g2 = gf_mulx(x), g4 = gf_mulx(g2), g9 = \
(x) ^ gf_mulx(g4), g4 ^= g9, \
(x) ^ g2 ^ g4 ^ upr(g2 ^ g9, 3) ^ \
upr(g4, 2) ^ upr(g9, 1))
#endif
#if defined(FL4_SET)
#define ls_box(x, c) four_tables(x, t_use(f, l), vf1, rf2, c)
#elif defined(LS4_SET)
#define ls_box(x, c) four_tables(x, t_use(l, s), vf1, rf2, c)
#elif defined(FL1_SET)
#define ls_box(x, c) one_table(x, upr, t_use(f, l), vf1, rf2, c)
#elif defined(LS1_SET)
#define ls_box(x, c) one_table(x, upr, t_use(l, s), vf1, rf2, c)
#else
#define ls_box(x, c) no_table(x, t_use(s, box), vf1, rf2, c)
#endif
#endif
#if defined(ASM_X86_V1C) && defined(AES_DECRYPT) && !defined(ISB_SET)
#define ISB_SET
#endif
#ifdef __cplusplus
}
#endif
#endif /* _AESOPT_H */

View File

@ -0,0 +1,165 @@
/*
* ---------------------------------------------------------------------------
* Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software is allowed (with or without
* changes) provided that:
*
* 1. source code distributions include the above copyright notice, this
* list of conditions and the following disclaimer;
*
* 2. binary distributions include the above copyright notice, this list
* of conditions and the following disclaimer in their documentation;
*
* 3. the name of the copyright holder is not used to endorse products
* built using this software without specific written permission.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
* Issue Date: 20/12/2007
*
* This file contains the code for declaring the tables needed to implement
* AES. The file aesopt.h is assumed to be included before this header file.
* If there are no global variables, the definitions here can be used to put
* the AES tables in a structure so that a pointer can then be added to the
* AES context to pass them to the AES routines that need them. If this
* facility is used, the calling program has to ensure that this pointer is
* managed appropriately. In particular, the value of the t_dec(in, it) item
* in the table structure must be set to zero in order to ensure that the
* tables are initialised. In practice the three code sequences in aeskey.c
* that control the calls to aes_init() and the aes_init() routine itself will
* have to be changed for a specific implementation. If global variables are
* available it will generally be preferable to use them with the precomputed
* FIXED_TABLES option that uses static global tables.
*
* The following defines can be used to control the way the tables
* are defined, initialised and used in embedded environments that
* require special features for these purposes
*
* the 't_dec' construction is used to declare fixed table arrays
* the 't_set' construction is used to set fixed table values
* the 't_use' construction is used to access fixed table values
*
* 256 byte tables:
*
* t_xxx(s, box) => forward S box
* t_xxx(i, box) => inverse S box
*
* 256 32-bit word OR 4 x 256 32-bit word tables:
*
* t_xxx(f, n) => forward normal round
* t_xxx(f, l) => forward last round
* t_xxx(i, n) => inverse normal round
* t_xxx(i, l) => inverse last round
* t_xxx(l, s) => key schedule table
* t_xxx(i, m) => key schedule table
*
* Other variables and tables:
*
* t_xxx(r, c) => the rcon table
*/
/*
* OpenSolaris OS modifications
*
* 1. Added __cplusplus and _AESTAB_H header guards
* 2. Added header file sys/types.h
* 3. Remove code defined for _MSC_VER
* 4. Changed all variables to "static const"
* 5. Changed uint_8t and uint_32t to uint8_t and uint32_t
* 6. Cstyled and hdrchk code
*/
#ifndef _AESTAB_H
#define _AESTAB_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/types.h>
#define t_dec(m, n) t_##m##n
#define t_set(m, n) t_##m##n
#define t_use(m, n) t_##m##n
#if defined(DO_TABLES) && defined(FIXED_TABLES)
#define d_1(t, n, b, e) static const t n[256] = b(e)
#define d_4(t, n, b, e, f, g, h) static const t n[4][256] = \
{b(e), b(f), b(g), b(h)}
static const uint32_t t_dec(r, c)[RC_LENGTH] = rc_data(w0);
#else
#define d_1(t, n, b, e) static const t n[256]
#define d_4(t, n, b, e, f, g, h) static const t n[4][256]
static const uint32_t t_dec(r, c)[RC_LENGTH];
#endif
#if defined(SBX_SET)
d_1(uint8_t, t_dec(s, box), sb_data, h0);
#endif
#if defined(ISB_SET)
d_1(uint8_t, t_dec(i, box), isb_data, h0);
#endif
#if defined(FT1_SET)
d_1(uint32_t, t_dec(f, n), sb_data, u0);
#endif
#if defined(FT4_SET)
d_4(uint32_t, t_dec(f, n), sb_data, u0, u1, u2, u3);
#endif
#if defined(FL1_SET)
d_1(uint32_t, t_dec(f, l), sb_data, w0);
#endif
#if defined(FL4_SET)
d_4(uint32_t, t_dec(f, l), sb_data, w0, w1, w2, w3);
#endif
#if defined(IT1_SET)
d_1(uint32_t, t_dec(i, n), isb_data, v0);
#endif
#if defined(IT4_SET)
d_4(uint32_t, t_dec(i, n), isb_data, v0, v1, v2, v3);
#endif
#if defined(IL1_SET)
d_1(uint32_t, t_dec(i, l), isb_data, w0);
#endif
#if defined(IL4_SET)
d_4(uint32_t, t_dec(i, l), isb_data, w0, w1, w2, w3);
#endif
#if defined(LS1_SET)
#if defined(FL1_SET)
#undef LS1_SET
#else
d_1(uint32_t, t_dec(l, s), sb_data, w0);
#endif
#endif
#if defined(LS4_SET)
#if defined(FL4_SET)
#undef LS4_SET
#else
d_4(uint32_t, t_dec(l, s), sb_data, w0, w1, w2, w3);
#endif
#endif
#if defined(IM1_SET)
d_1(uint32_t, t_dec(i, m), mm_data, v0);
#endif
#if defined(IM4_SET)
d_4(uint32_t, t_dec(i, m), mm_data, v0, v1, v2, v3);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _AESTAB_H */

View File

@ -0,0 +1,594 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _AESTAB2_H
#define _AESTAB2_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* To create this file for OpenSolaris:
* 1. Compile and run tablegen.c, from aes-src-04-03-08.zip,
* after defining ASM_AMD64_C
* 2. mv aestab2.c aestab2.h
* 3. Add __cplusplus and _AESTAB2_H header guards
* 3. Add #include <aes_impl.h>
* 4. Change "uint_32t" to "uint32_t"
* 5. Change all variables to "static const"
* 6. Cstyle and hdrchk this file
*/
#include <aes/aes_impl.h>
static const uint32_t t_rc[RC_LENGTH] =
{
0x00000001, 0x00000002, 0x00000004, 0x00000008,
0x00000010, 0x00000020, 0x00000040, 0x00000080,
0x0000001b, 0x00000036
};
static const uint32_t t_ls[4][256] =
{
{
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
0x00000030, 0x00000001, 0x00000067, 0x0000002b,
0x000000fe, 0x000000d7, 0x000000ab, 0x00000076,
0x000000ca, 0x00000082, 0x000000c9, 0x0000007d,
0x000000fa, 0x00000059, 0x00000047, 0x000000f0,
0x000000ad, 0x000000d4, 0x000000a2, 0x000000af,
0x0000009c, 0x000000a4, 0x00000072, 0x000000c0,
0x000000b7, 0x000000fd, 0x00000093, 0x00000026,
0x00000036, 0x0000003f, 0x000000f7, 0x000000cc,
0x00000034, 0x000000a5, 0x000000e5, 0x000000f1,
0x00000071, 0x000000d8, 0x00000031, 0x00000015,
0x00000004, 0x000000c7, 0x00000023, 0x000000c3,
0x00000018, 0x00000096, 0x00000005, 0x0000009a,
0x00000007, 0x00000012, 0x00000080, 0x000000e2,
0x000000eb, 0x00000027, 0x000000b2, 0x00000075,
0x00000009, 0x00000083, 0x0000002c, 0x0000001a,
0x0000001b, 0x0000006e, 0x0000005a, 0x000000a0,
0x00000052, 0x0000003b, 0x000000d6, 0x000000b3,
0x00000029, 0x000000e3, 0x0000002f, 0x00000084,
0x00000053, 0x000000d1, 0x00000000, 0x000000ed,
0x00000020, 0x000000fc, 0x000000b1, 0x0000005b,
0x0000006a, 0x000000cb, 0x000000be, 0x00000039,
0x0000004a, 0x0000004c, 0x00000058, 0x000000cf,
0x000000d0, 0x000000ef, 0x000000aa, 0x000000fb,
0x00000043, 0x0000004d, 0x00000033, 0x00000085,
0x00000045, 0x000000f9, 0x00000002, 0x0000007f,
0x00000050, 0x0000003c, 0x0000009f, 0x000000a8,
0x00000051, 0x000000a3, 0x00000040, 0x0000008f,
0x00000092, 0x0000009d, 0x00000038, 0x000000f5,
0x000000bc, 0x000000b6, 0x000000da, 0x00000021,
0x00000010, 0x000000ff, 0x000000f3, 0x000000d2,
0x000000cd, 0x0000000c, 0x00000013, 0x000000ec,
0x0000005f, 0x00000097, 0x00000044, 0x00000017,
0x000000c4, 0x000000a7, 0x0000007e, 0x0000003d,
0x00000064, 0x0000005d, 0x00000019, 0x00000073,
0x00000060, 0x00000081, 0x0000004f, 0x000000dc,
0x00000022, 0x0000002a, 0x00000090, 0x00000088,
0x00000046, 0x000000ee, 0x000000b8, 0x00000014,
0x000000de, 0x0000005e, 0x0000000b, 0x000000db,
0x000000e0, 0x00000032, 0x0000003a, 0x0000000a,
0x00000049, 0x00000006, 0x00000024, 0x0000005c,
0x000000c2, 0x000000d3, 0x000000ac, 0x00000062,
0x00000091, 0x00000095, 0x000000e4, 0x00000079,
0x000000e7, 0x000000c8, 0x00000037, 0x0000006d,
0x0000008d, 0x000000d5, 0x0000004e, 0x000000a9,
0x0000006c, 0x00000056, 0x000000f4, 0x000000ea,
0x00000065, 0x0000007a, 0x000000ae, 0x00000008,
0x000000ba, 0x00000078, 0x00000025, 0x0000002e,
0x0000001c, 0x000000a6, 0x000000b4, 0x000000c6,
0x000000e8, 0x000000dd, 0x00000074, 0x0000001f,
0x0000004b, 0x000000bd, 0x0000008b, 0x0000008a,
0x00000070, 0x0000003e, 0x000000b5, 0x00000066,
0x00000048, 0x00000003, 0x000000f6, 0x0000000e,
0x00000061, 0x00000035, 0x00000057, 0x000000b9,
0x00000086, 0x000000c1, 0x0000001d, 0x0000009e,
0x000000e1, 0x000000f8, 0x00000098, 0x00000011,
0x00000069, 0x000000d9, 0x0000008e, 0x00000094,
0x0000009b, 0x0000001e, 0x00000087, 0x000000e9,
0x000000ce, 0x00000055, 0x00000028, 0x000000df,
0x0000008c, 0x000000a1, 0x00000089, 0x0000000d,
0x000000bf, 0x000000e6, 0x00000042, 0x00000068,
0x00000041, 0x00000099, 0x0000002d, 0x0000000f,
0x000000b0, 0x00000054, 0x000000bb, 0x00000016
},
{
0x00006300, 0x00007c00, 0x00007700, 0x00007b00,
0x0000f200, 0x00006b00, 0x00006f00, 0x0000c500,
0x00003000, 0x00000100, 0x00006700, 0x00002b00,
0x0000fe00, 0x0000d700, 0x0000ab00, 0x00007600,
0x0000ca00, 0x00008200, 0x0000c900, 0x00007d00,
0x0000fa00, 0x00005900, 0x00004700, 0x0000f000,
0x0000ad00, 0x0000d400, 0x0000a200, 0x0000af00,
0x00009c00, 0x0000a400, 0x00007200, 0x0000c000,
0x0000b700, 0x0000fd00, 0x00009300, 0x00002600,
0x00003600, 0x00003f00, 0x0000f700, 0x0000cc00,
0x00003400, 0x0000a500, 0x0000e500, 0x0000f100,
0x00007100, 0x0000d800, 0x00003100, 0x00001500,
0x00000400, 0x0000c700, 0x00002300, 0x0000c300,
0x00001800, 0x00009600, 0x00000500, 0x00009a00,
0x00000700, 0x00001200, 0x00008000, 0x0000e200,
0x0000eb00, 0x00002700, 0x0000b200, 0x00007500,
0x00000900, 0x00008300, 0x00002c00, 0x00001a00,
0x00001b00, 0x00006e00, 0x00005a00, 0x0000a000,
0x00005200, 0x00003b00, 0x0000d600, 0x0000b300,
0x00002900, 0x0000e300, 0x00002f00, 0x00008400,
0x00005300, 0x0000d100, 0x00000000, 0x0000ed00,
0x00002000, 0x0000fc00, 0x0000b100, 0x00005b00,
0x00006a00, 0x0000cb00, 0x0000be00, 0x00003900,
0x00004a00, 0x00004c00, 0x00005800, 0x0000cf00,
0x0000d000, 0x0000ef00, 0x0000aa00, 0x0000fb00,
0x00004300, 0x00004d00, 0x00003300, 0x00008500,
0x00004500, 0x0000f900, 0x00000200, 0x00007f00,
0x00005000, 0x00003c00, 0x00009f00, 0x0000a800,
0x00005100, 0x0000a300, 0x00004000, 0x00008f00,
0x00009200, 0x00009d00, 0x00003800, 0x0000f500,
0x0000bc00, 0x0000b600, 0x0000da00, 0x00002100,
0x00001000, 0x0000ff00, 0x0000f300, 0x0000d200,
0x0000cd00, 0x00000c00, 0x00001300, 0x0000ec00,
0x00005f00, 0x00009700, 0x00004400, 0x00001700,
0x0000c400, 0x0000a700, 0x00007e00, 0x00003d00,
0x00006400, 0x00005d00, 0x00001900, 0x00007300,
0x00006000, 0x00008100, 0x00004f00, 0x0000dc00,
0x00002200, 0x00002a00, 0x00009000, 0x00008800,
0x00004600, 0x0000ee00, 0x0000b800, 0x00001400,
0x0000de00, 0x00005e00, 0x00000b00, 0x0000db00,
0x0000e000, 0x00003200, 0x00003a00, 0x00000a00,
0x00004900, 0x00000600, 0x00002400, 0x00005c00,
0x0000c200, 0x0000d300, 0x0000ac00, 0x00006200,
0x00009100, 0x00009500, 0x0000e400, 0x00007900,
0x0000e700, 0x0000c800, 0x00003700, 0x00006d00,
0x00008d00, 0x0000d500, 0x00004e00, 0x0000a900,
0x00006c00, 0x00005600, 0x0000f400, 0x0000ea00,
0x00006500, 0x00007a00, 0x0000ae00, 0x00000800,
0x0000ba00, 0x00007800, 0x00002500, 0x00002e00,
0x00001c00, 0x0000a600, 0x0000b400, 0x0000c600,
0x0000e800, 0x0000dd00, 0x00007400, 0x00001f00,
0x00004b00, 0x0000bd00, 0x00008b00, 0x00008a00,
0x00007000, 0x00003e00, 0x0000b500, 0x00006600,
0x00004800, 0x00000300, 0x0000f600, 0x00000e00,
0x00006100, 0x00003500, 0x00005700, 0x0000b900,
0x00008600, 0x0000c100, 0x00001d00, 0x00009e00,
0x0000e100, 0x0000f800, 0x00009800, 0x00001100,
0x00006900, 0x0000d900, 0x00008e00, 0x00009400,
0x00009b00, 0x00001e00, 0x00008700, 0x0000e900,
0x0000ce00, 0x00005500, 0x00002800, 0x0000df00,
0x00008c00, 0x0000a100, 0x00008900, 0x00000d00,
0x0000bf00, 0x0000e600, 0x00004200, 0x00006800,
0x00004100, 0x00009900, 0x00002d00, 0x00000f00,
0x0000b000, 0x00005400, 0x0000bb00, 0x00001600
},
{
0x00630000, 0x007c0000, 0x00770000, 0x007b0000,
0x00f20000, 0x006b0000, 0x006f0000, 0x00c50000,
0x00300000, 0x00010000, 0x00670000, 0x002b0000,
0x00fe0000, 0x00d70000, 0x00ab0000, 0x00760000,
0x00ca0000, 0x00820000, 0x00c90000, 0x007d0000,
0x00fa0000, 0x00590000, 0x00470000, 0x00f00000,
0x00ad0000, 0x00d40000, 0x00a20000, 0x00af0000,
0x009c0000, 0x00a40000, 0x00720000, 0x00c00000,
0x00b70000, 0x00fd0000, 0x00930000, 0x00260000,
0x00360000, 0x003f0000, 0x00f70000, 0x00cc0000,
0x00340000, 0x00a50000, 0x00e50000, 0x00f10000,
0x00710000, 0x00d80000, 0x00310000, 0x00150000,
0x00040000, 0x00c70000, 0x00230000, 0x00c30000,
0x00180000, 0x00960000, 0x00050000, 0x009a0000,
0x00070000, 0x00120000, 0x00800000, 0x00e20000,
0x00eb0000, 0x00270000, 0x00b20000, 0x00750000,
0x00090000, 0x00830000, 0x002c0000, 0x001a0000,
0x001b0000, 0x006e0000, 0x005a0000, 0x00a00000,
0x00520000, 0x003b0000, 0x00d60000, 0x00b30000,
0x00290000, 0x00e30000, 0x002f0000, 0x00840000,
0x00530000, 0x00d10000, 0x00000000, 0x00ed0000,
0x00200000, 0x00fc0000, 0x00b10000, 0x005b0000,
0x006a0000, 0x00cb0000, 0x00be0000, 0x00390000,
0x004a0000, 0x004c0000, 0x00580000, 0x00cf0000,
0x00d00000, 0x00ef0000, 0x00aa0000, 0x00fb0000,
0x00430000, 0x004d0000, 0x00330000, 0x00850000,
0x00450000, 0x00f90000, 0x00020000, 0x007f0000,
0x00500000, 0x003c0000, 0x009f0000, 0x00a80000,
0x00510000, 0x00a30000, 0x00400000, 0x008f0000,
0x00920000, 0x009d0000, 0x00380000, 0x00f50000,
0x00bc0000, 0x00b60000, 0x00da0000, 0x00210000,
0x00100000, 0x00ff0000, 0x00f30000, 0x00d20000,
0x00cd0000, 0x000c0000, 0x00130000, 0x00ec0000,
0x005f0000, 0x00970000, 0x00440000, 0x00170000,
0x00c40000, 0x00a70000, 0x007e0000, 0x003d0000,
0x00640000, 0x005d0000, 0x00190000, 0x00730000,
0x00600000, 0x00810000, 0x004f0000, 0x00dc0000,
0x00220000, 0x002a0000, 0x00900000, 0x00880000,
0x00460000, 0x00ee0000, 0x00b80000, 0x00140000,
0x00de0000, 0x005e0000, 0x000b0000, 0x00db0000,
0x00e00000, 0x00320000, 0x003a0000, 0x000a0000,
0x00490000, 0x00060000, 0x00240000, 0x005c0000,
0x00c20000, 0x00d30000, 0x00ac0000, 0x00620000,
0x00910000, 0x00950000, 0x00e40000, 0x00790000,
0x00e70000, 0x00c80000, 0x00370000, 0x006d0000,
0x008d0000, 0x00d50000, 0x004e0000, 0x00a90000,
0x006c0000, 0x00560000, 0x00f40000, 0x00ea0000,
0x00650000, 0x007a0000, 0x00ae0000, 0x00080000,
0x00ba0000, 0x00780000, 0x00250000, 0x002e0000,
0x001c0000, 0x00a60000, 0x00b40000, 0x00c60000,
0x00e80000, 0x00dd0000, 0x00740000, 0x001f0000,
0x004b0000, 0x00bd0000, 0x008b0000, 0x008a0000,
0x00700000, 0x003e0000, 0x00b50000, 0x00660000,
0x00480000, 0x00030000, 0x00f60000, 0x000e0000,
0x00610000, 0x00350000, 0x00570000, 0x00b90000,
0x00860000, 0x00c10000, 0x001d0000, 0x009e0000,
0x00e10000, 0x00f80000, 0x00980000, 0x00110000,
0x00690000, 0x00d90000, 0x008e0000, 0x00940000,
0x009b0000, 0x001e0000, 0x00870000, 0x00e90000,
0x00ce0000, 0x00550000, 0x00280000, 0x00df0000,
0x008c0000, 0x00a10000, 0x00890000, 0x000d0000,
0x00bf0000, 0x00e60000, 0x00420000, 0x00680000,
0x00410000, 0x00990000, 0x002d0000, 0x000f0000,
0x00b00000, 0x00540000, 0x00bb0000, 0x00160000
},
{
0x63000000, 0x7c000000, 0x77000000, 0x7b000000,
0xf2000000, 0x6b000000, 0x6f000000, 0xc5000000,
0x30000000, 0x01000000, 0x67000000, 0x2b000000,
0xfe000000, 0xd7000000, 0xab000000, 0x76000000,
0xca000000, 0x82000000, 0xc9000000, 0x7d000000,
0xfa000000, 0x59000000, 0x47000000, 0xf0000000,
0xad000000, 0xd4000000, 0xa2000000, 0xaf000000,
0x9c000000, 0xa4000000, 0x72000000, 0xc0000000,
0xb7000000, 0xfd000000, 0x93000000, 0x26000000,
0x36000000, 0x3f000000, 0xf7000000, 0xcc000000,
0x34000000, 0xa5000000, 0xe5000000, 0xf1000000,
0x71000000, 0xd8000000, 0x31000000, 0x15000000,
0x04000000, 0xc7000000, 0x23000000, 0xc3000000,
0x18000000, 0x96000000, 0x05000000, 0x9a000000,
0x07000000, 0x12000000, 0x80000000, 0xe2000000,
0xeb000000, 0x27000000, 0xb2000000, 0x75000000,
0x09000000, 0x83000000, 0x2c000000, 0x1a000000,
0x1b000000, 0x6e000000, 0x5a000000, 0xa0000000,
0x52000000, 0x3b000000, 0xd6000000, 0xb3000000,
0x29000000, 0xe3000000, 0x2f000000, 0x84000000,
0x53000000, 0xd1000000, 0x00000000, 0xed000000,
0x20000000, 0xfc000000, 0xb1000000, 0x5b000000,
0x6a000000, 0xcb000000, 0xbe000000, 0x39000000,
0x4a000000, 0x4c000000, 0x58000000, 0xcf000000,
0xd0000000, 0xef000000, 0xaa000000, 0xfb000000,
0x43000000, 0x4d000000, 0x33000000, 0x85000000,
0x45000000, 0xf9000000, 0x02000000, 0x7f000000,
0x50000000, 0x3c000000, 0x9f000000, 0xa8000000,
0x51000000, 0xa3000000, 0x40000000, 0x8f000000,
0x92000000, 0x9d000000, 0x38000000, 0xf5000000,
0xbc000000, 0xb6000000, 0xda000000, 0x21000000,
0x10000000, 0xff000000, 0xf3000000, 0xd2000000,
0xcd000000, 0x0c000000, 0x13000000, 0xec000000,
0x5f000000, 0x97000000, 0x44000000, 0x17000000,
0xc4000000, 0xa7000000, 0x7e000000, 0x3d000000,
0x64000000, 0x5d000000, 0x19000000, 0x73000000,
0x60000000, 0x81000000, 0x4f000000, 0xdc000000,
0x22000000, 0x2a000000, 0x90000000, 0x88000000,
0x46000000, 0xee000000, 0xb8000000, 0x14000000,
0xde000000, 0x5e000000, 0x0b000000, 0xdb000000,
0xe0000000, 0x32000000, 0x3a000000, 0x0a000000,
0x49000000, 0x06000000, 0x24000000, 0x5c000000,
0xc2000000, 0xd3000000, 0xac000000, 0x62000000,
0x91000000, 0x95000000, 0xe4000000, 0x79000000,
0xe7000000, 0xc8000000, 0x37000000, 0x6d000000,
0x8d000000, 0xd5000000, 0x4e000000, 0xa9000000,
0x6c000000, 0x56000000, 0xf4000000, 0xea000000,
0x65000000, 0x7a000000, 0xae000000, 0x08000000,
0xba000000, 0x78000000, 0x25000000, 0x2e000000,
0x1c000000, 0xa6000000, 0xb4000000, 0xc6000000,
0xe8000000, 0xdd000000, 0x74000000, 0x1f000000,
0x4b000000, 0xbd000000, 0x8b000000, 0x8a000000,
0x70000000, 0x3e000000, 0xb5000000, 0x66000000,
0x48000000, 0x03000000, 0xf6000000, 0x0e000000,
0x61000000, 0x35000000, 0x57000000, 0xb9000000,
0x86000000, 0xc1000000, 0x1d000000, 0x9e000000,
0xe1000000, 0xf8000000, 0x98000000, 0x11000000,
0x69000000, 0xd9000000, 0x8e000000, 0x94000000,
0x9b000000, 0x1e000000, 0x87000000, 0xe9000000,
0xce000000, 0x55000000, 0x28000000, 0xdf000000,
0x8c000000, 0xa1000000, 0x89000000, 0x0d000000,
0xbf000000, 0xe6000000, 0x42000000, 0x68000000,
0x41000000, 0x99000000, 0x2d000000, 0x0f000000,
0xb0000000, 0x54000000, 0xbb000000, 0x16000000
}
};
static const uint32_t t_im[4][256] =
{
{
0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12,
0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a,
0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362,
0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a,
0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2,
0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca,
0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382,
0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba,
0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9,
0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1,
0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9,
0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81,
0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029,
0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411,
0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859,
0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61,
0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf,
0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987,
0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf,
0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7,
0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f,
0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967,
0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f,
0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117,
0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664,
0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c,
0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14,
0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c,
0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684,
0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc,
0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4,
0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc,
0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753,
0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b,
0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23,
0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b,
0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3,
0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b,
0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3,
0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb,
0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88,
0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0,
0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8,
0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0,
0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68,
0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850,
0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418,
0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020,
0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe,
0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6,
0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e,
0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6,
0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e,
0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526,
0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e,
0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56,
0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25,
0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d,
0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255,
0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d,
0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5,
0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd,
0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5,
0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d
},
{
0x00000000, 0x0d090e0b, 0x1a121c16, 0x171b121d,
0x3424382c, 0x392d3627, 0x2e36243a, 0x233f2a31,
0x68487058, 0x65417e53, 0x725a6c4e, 0x7f536245,
0x5c6c4874, 0x5165467f, 0x467e5462, 0x4b775a69,
0xd090e0b0, 0xdd99eebb, 0xca82fca6, 0xc78bf2ad,
0xe4b4d89c, 0xe9bdd697, 0xfea6c48a, 0xf3afca81,
0xb8d890e8, 0xb5d19ee3, 0xa2ca8cfe, 0xafc382f5,
0x8cfca8c4, 0x81f5a6cf, 0x96eeb4d2, 0x9be7bad9,
0xbb3bdb7b, 0xb632d570, 0xa129c76d, 0xac20c966,
0x8f1fe357, 0x8216ed5c, 0x950dff41, 0x9804f14a,
0xd373ab23, 0xde7aa528, 0xc961b735, 0xc468b93e,
0xe757930f, 0xea5e9d04, 0xfd458f19, 0xf04c8112,
0x6bab3bcb, 0x66a235c0, 0x71b927dd, 0x7cb029d6,
0x5f8f03e7, 0x52860dec, 0x459d1ff1, 0x489411fa,
0x03e34b93, 0x0eea4598, 0x19f15785, 0x14f8598e,
0x37c773bf, 0x3ace7db4, 0x2dd56fa9, 0x20dc61a2,
0x6d76adf6, 0x607fa3fd, 0x7764b1e0, 0x7a6dbfeb,
0x595295da, 0x545b9bd1, 0x434089cc, 0x4e4987c7,
0x053eddae, 0x0837d3a5, 0x1f2cc1b8, 0x1225cfb3,
0x311ae582, 0x3c13eb89, 0x2b08f994, 0x2601f79f,
0xbde64d46, 0xb0ef434d, 0xa7f45150, 0xaafd5f5b,
0x89c2756a, 0x84cb7b61, 0x93d0697c, 0x9ed96777,
0xd5ae3d1e, 0xd8a73315, 0xcfbc2108, 0xc2b52f03,
0xe18a0532, 0xec830b39, 0xfb981924, 0xf691172f,
0xd64d768d, 0xdb447886, 0xcc5f6a9b, 0xc1566490,
0xe2694ea1, 0xef6040aa, 0xf87b52b7, 0xf5725cbc,
0xbe0506d5, 0xb30c08de, 0xa4171ac3, 0xa91e14c8,
0x8a213ef9, 0x872830f2, 0x903322ef, 0x9d3a2ce4,
0x06dd963d, 0x0bd49836, 0x1ccf8a2b, 0x11c68420,
0x32f9ae11, 0x3ff0a01a, 0x28ebb207, 0x25e2bc0c,
0x6e95e665, 0x639ce86e, 0x7487fa73, 0x798ef478,
0x5ab1de49, 0x57b8d042, 0x40a3c25f, 0x4daacc54,
0xdaec41f7, 0xd7e54ffc, 0xc0fe5de1, 0xcdf753ea,
0xeec879db, 0xe3c177d0, 0xf4da65cd, 0xf9d36bc6,
0xb2a431af, 0xbfad3fa4, 0xa8b62db9, 0xa5bf23b2,
0x86800983, 0x8b890788, 0x9c921595, 0x919b1b9e,
0x0a7ca147, 0x0775af4c, 0x106ebd51, 0x1d67b35a,
0x3e58996b, 0x33519760, 0x244a857d, 0x29438b76,
0x6234d11f, 0x6f3ddf14, 0x7826cd09, 0x752fc302,
0x5610e933, 0x5b19e738, 0x4c02f525, 0x410bfb2e,
0x61d79a8c, 0x6cde9487, 0x7bc5869a, 0x76cc8891,
0x55f3a2a0, 0x58faacab, 0x4fe1beb6, 0x42e8b0bd,
0x099fead4, 0x0496e4df, 0x138df6c2, 0x1e84f8c9,
0x3dbbd2f8, 0x30b2dcf3, 0x27a9ceee, 0x2aa0c0e5,
0xb1477a3c, 0xbc4e7437, 0xab55662a, 0xa65c6821,
0x85634210, 0x886a4c1b, 0x9f715e06, 0x9278500d,
0xd90f0a64, 0xd406046f, 0xc31d1672, 0xce141879,
0xed2b3248, 0xe0223c43, 0xf7392e5e, 0xfa302055,
0xb79aec01, 0xba93e20a, 0xad88f017, 0xa081fe1c,
0x83bed42d, 0x8eb7da26, 0x99acc83b, 0x94a5c630,
0xdfd29c59, 0xd2db9252, 0xc5c0804f, 0xc8c98e44,
0xebf6a475, 0xe6ffaa7e, 0xf1e4b863, 0xfcedb668,
0x670a0cb1, 0x6a0302ba, 0x7d1810a7, 0x70111eac,
0x532e349d, 0x5e273a96, 0x493c288b, 0x44352680,
0x0f427ce9, 0x024b72e2, 0x155060ff, 0x18596ef4,
0x3b6644c5, 0x366f4ace, 0x217458d3, 0x2c7d56d8,
0x0ca1377a, 0x01a83971, 0x16b32b6c, 0x1bba2567,
0x38850f56, 0x358c015d, 0x22971340, 0x2f9e1d4b,
0x64e94722, 0x69e04929, 0x7efb5b34, 0x73f2553f,
0x50cd7f0e, 0x5dc47105, 0x4adf6318, 0x47d66d13,
0xdc31d7ca, 0xd138d9c1, 0xc623cbdc, 0xcb2ac5d7,
0xe815efe6, 0xe51ce1ed, 0xf207f3f0, 0xff0efdfb,
0xb479a792, 0xb970a999, 0xae6bbb84, 0xa362b58f,
0x805d9fbe, 0x8d5491b5, 0x9a4f83a8, 0x97468da3
},
{
0x00000000, 0x090e0b0d, 0x121c161a, 0x1b121d17,
0x24382c34, 0x2d362739, 0x36243a2e, 0x3f2a3123,
0x48705868, 0x417e5365, 0x5a6c4e72, 0x5362457f,
0x6c48745c, 0x65467f51, 0x7e546246, 0x775a694b,
0x90e0b0d0, 0x99eebbdd, 0x82fca6ca, 0x8bf2adc7,
0xb4d89ce4, 0xbdd697e9, 0xa6c48afe, 0xafca81f3,
0xd890e8b8, 0xd19ee3b5, 0xca8cfea2, 0xc382f5af,
0xfca8c48c, 0xf5a6cf81, 0xeeb4d296, 0xe7bad99b,
0x3bdb7bbb, 0x32d570b6, 0x29c76da1, 0x20c966ac,
0x1fe3578f, 0x16ed5c82, 0x0dff4195, 0x04f14a98,
0x73ab23d3, 0x7aa528de, 0x61b735c9, 0x68b93ec4,
0x57930fe7, 0x5e9d04ea, 0x458f19fd, 0x4c8112f0,
0xab3bcb6b, 0xa235c066, 0xb927dd71, 0xb029d67c,
0x8f03e75f, 0x860dec52, 0x9d1ff145, 0x9411fa48,
0xe34b9303, 0xea45980e, 0xf1578519, 0xf8598e14,
0xc773bf37, 0xce7db43a, 0xd56fa92d, 0xdc61a220,
0x76adf66d, 0x7fa3fd60, 0x64b1e077, 0x6dbfeb7a,
0x5295da59, 0x5b9bd154, 0x4089cc43, 0x4987c74e,
0x3eddae05, 0x37d3a508, 0x2cc1b81f, 0x25cfb312,
0x1ae58231, 0x13eb893c, 0x08f9942b, 0x01f79f26,
0xe64d46bd, 0xef434db0, 0xf45150a7, 0xfd5f5baa,
0xc2756a89, 0xcb7b6184, 0xd0697c93, 0xd967779e,
0xae3d1ed5, 0xa73315d8, 0xbc2108cf, 0xb52f03c2,
0x8a0532e1, 0x830b39ec, 0x981924fb, 0x91172ff6,
0x4d768dd6, 0x447886db, 0x5f6a9bcc, 0x566490c1,
0x694ea1e2, 0x6040aaef, 0x7b52b7f8, 0x725cbcf5,
0x0506d5be, 0x0c08deb3, 0x171ac3a4, 0x1e14c8a9,
0x213ef98a, 0x2830f287, 0x3322ef90, 0x3a2ce49d,
0xdd963d06, 0xd498360b, 0xcf8a2b1c, 0xc6842011,
0xf9ae1132, 0xf0a01a3f, 0xebb20728, 0xe2bc0c25,
0x95e6656e, 0x9ce86e63, 0x87fa7374, 0x8ef47879,
0xb1de495a, 0xb8d04257, 0xa3c25f40, 0xaacc544d,
0xec41f7da, 0xe54ffcd7, 0xfe5de1c0, 0xf753eacd,
0xc879dbee, 0xc177d0e3, 0xda65cdf4, 0xd36bc6f9,
0xa431afb2, 0xad3fa4bf, 0xb62db9a8, 0xbf23b2a5,
0x80098386, 0x8907888b, 0x9215959c, 0x9b1b9e91,
0x7ca1470a, 0x75af4c07, 0x6ebd5110, 0x67b35a1d,
0x58996b3e, 0x51976033, 0x4a857d24, 0x438b7629,
0x34d11f62, 0x3ddf146f, 0x26cd0978, 0x2fc30275,
0x10e93356, 0x19e7385b, 0x02f5254c, 0x0bfb2e41,
0xd79a8c61, 0xde94876c, 0xc5869a7b, 0xcc889176,
0xf3a2a055, 0xfaacab58, 0xe1beb64f, 0xe8b0bd42,
0x9fead409, 0x96e4df04, 0x8df6c213, 0x84f8c91e,
0xbbd2f83d, 0xb2dcf330, 0xa9ceee27, 0xa0c0e52a,
0x477a3cb1, 0x4e7437bc, 0x55662aab, 0x5c6821a6,
0x63421085, 0x6a4c1b88, 0x715e069f, 0x78500d92,
0x0f0a64d9, 0x06046fd4, 0x1d1672c3, 0x141879ce,
0x2b3248ed, 0x223c43e0, 0x392e5ef7, 0x302055fa,
0x9aec01b7, 0x93e20aba, 0x88f017ad, 0x81fe1ca0,
0xbed42d83, 0xb7da268e, 0xacc83b99, 0xa5c63094,
0xd29c59df, 0xdb9252d2, 0xc0804fc5, 0xc98e44c8,
0xf6a475eb, 0xffaa7ee6, 0xe4b863f1, 0xedb668fc,
0x0a0cb167, 0x0302ba6a, 0x1810a77d, 0x111eac70,
0x2e349d53, 0x273a965e, 0x3c288b49, 0x35268044,
0x427ce90f, 0x4b72e202, 0x5060ff15, 0x596ef418,
0x6644c53b, 0x6f4ace36, 0x7458d321, 0x7d56d82c,
0xa1377a0c, 0xa8397101, 0xb32b6c16, 0xba25671b,
0x850f5638, 0x8c015d35, 0x97134022, 0x9e1d4b2f,
0xe9472264, 0xe0492969, 0xfb5b347e, 0xf2553f73,
0xcd7f0e50, 0xc471055d, 0xdf63184a, 0xd66d1347,
0x31d7cadc, 0x38d9c1d1, 0x23cbdcc6, 0x2ac5d7cb,
0x15efe6e8, 0x1ce1ede5, 0x07f3f0f2, 0x0efdfbff,
0x79a792b4, 0x70a999b9, 0x6bbb84ae, 0x62b58fa3,
0x5d9fbe80, 0x5491b58d, 0x4f83a89a, 0x468da397
},
{
0x00000000, 0x0e0b0d09, 0x1c161a12, 0x121d171b,
0x382c3424, 0x3627392d, 0x243a2e36, 0x2a31233f,
0x70586848, 0x7e536541, 0x6c4e725a, 0x62457f53,
0x48745c6c, 0x467f5165, 0x5462467e, 0x5a694b77,
0xe0b0d090, 0xeebbdd99, 0xfca6ca82, 0xf2adc78b,
0xd89ce4b4, 0xd697e9bd, 0xc48afea6, 0xca81f3af,
0x90e8b8d8, 0x9ee3b5d1, 0x8cfea2ca, 0x82f5afc3,
0xa8c48cfc, 0xa6cf81f5, 0xb4d296ee, 0xbad99be7,
0xdb7bbb3b, 0xd570b632, 0xc76da129, 0xc966ac20,
0xe3578f1f, 0xed5c8216, 0xff41950d, 0xf14a9804,
0xab23d373, 0xa528de7a, 0xb735c961, 0xb93ec468,
0x930fe757, 0x9d04ea5e, 0x8f19fd45, 0x8112f04c,
0x3bcb6bab, 0x35c066a2, 0x27dd71b9, 0x29d67cb0,
0x03e75f8f, 0x0dec5286, 0x1ff1459d, 0x11fa4894,
0x4b9303e3, 0x45980eea, 0x578519f1, 0x598e14f8,
0x73bf37c7, 0x7db43ace, 0x6fa92dd5, 0x61a220dc,
0xadf66d76, 0xa3fd607f, 0xb1e07764, 0xbfeb7a6d,
0x95da5952, 0x9bd1545b, 0x89cc4340, 0x87c74e49,
0xddae053e, 0xd3a50837, 0xc1b81f2c, 0xcfb31225,
0xe582311a, 0xeb893c13, 0xf9942b08, 0xf79f2601,
0x4d46bde6, 0x434db0ef, 0x5150a7f4, 0x5f5baafd,
0x756a89c2, 0x7b6184cb, 0x697c93d0, 0x67779ed9,
0x3d1ed5ae, 0x3315d8a7, 0x2108cfbc, 0x2f03c2b5,
0x0532e18a, 0x0b39ec83, 0x1924fb98, 0x172ff691,
0x768dd64d, 0x7886db44, 0x6a9bcc5f, 0x6490c156,
0x4ea1e269, 0x40aaef60, 0x52b7f87b, 0x5cbcf572,
0x06d5be05, 0x08deb30c, 0x1ac3a417, 0x14c8a91e,
0x3ef98a21, 0x30f28728, 0x22ef9033, 0x2ce49d3a,
0x963d06dd, 0x98360bd4, 0x8a2b1ccf, 0x842011c6,
0xae1132f9, 0xa01a3ff0, 0xb20728eb, 0xbc0c25e2,
0xe6656e95, 0xe86e639c, 0xfa737487, 0xf478798e,
0xde495ab1, 0xd04257b8, 0xc25f40a3, 0xcc544daa,
0x41f7daec, 0x4ffcd7e5, 0x5de1c0fe, 0x53eacdf7,
0x79dbeec8, 0x77d0e3c1, 0x65cdf4da, 0x6bc6f9d3,
0x31afb2a4, 0x3fa4bfad, 0x2db9a8b6, 0x23b2a5bf,
0x09838680, 0x07888b89, 0x15959c92, 0x1b9e919b,
0xa1470a7c, 0xaf4c0775, 0xbd51106e, 0xb35a1d67,
0x996b3e58, 0x97603351, 0x857d244a, 0x8b762943,
0xd11f6234, 0xdf146f3d, 0xcd097826, 0xc302752f,
0xe9335610, 0xe7385b19, 0xf5254c02, 0xfb2e410b,
0x9a8c61d7, 0x94876cde, 0x869a7bc5, 0x889176cc,
0xa2a055f3, 0xacab58fa, 0xbeb64fe1, 0xb0bd42e8,
0xead4099f, 0xe4df0496, 0xf6c2138d, 0xf8c91e84,
0xd2f83dbb, 0xdcf330b2, 0xceee27a9, 0xc0e52aa0,
0x7a3cb147, 0x7437bc4e, 0x662aab55, 0x6821a65c,
0x42108563, 0x4c1b886a, 0x5e069f71, 0x500d9278,
0x0a64d90f, 0x046fd406, 0x1672c31d, 0x1879ce14,
0x3248ed2b, 0x3c43e022, 0x2e5ef739, 0x2055fa30,
0xec01b79a, 0xe20aba93, 0xf017ad88, 0xfe1ca081,
0xd42d83be, 0xda268eb7, 0xc83b99ac, 0xc63094a5,
0x9c59dfd2, 0x9252d2db, 0x804fc5c0, 0x8e44c8c9,
0xa475ebf6, 0xaa7ee6ff, 0xb863f1e4, 0xb668fced,
0x0cb1670a, 0x02ba6a03, 0x10a77d18, 0x1eac7011,
0x349d532e, 0x3a965e27, 0x288b493c, 0x26804435,
0x7ce90f42, 0x72e2024b, 0x60ff1550, 0x6ef41859,
0x44c53b66, 0x4ace366f, 0x58d32174, 0x56d82c7d,
0x377a0ca1, 0x397101a8, 0x2b6c16b3, 0x25671bba,
0x0f563885, 0x015d358c, 0x13402297, 0x1d4b2f9e,
0x472264e9, 0x492969e0, 0x5b347efb, 0x553f73f2,
0x7f0e50cd, 0x71055dc4, 0x63184adf, 0x6d1347d6,
0xd7cadc31, 0xd9c1d138, 0xcbdcc623, 0xc5d7cb2a,
0xefe6e815, 0xe1ede51c, 0xf3f0f207, 0xfdfbff0e,
0xa792b479, 0xa999b970, 0xbb84ae6b, 0xb58fa362,
0x9fbe805d, 0x91b58d54, 0x83a89a4f, 0x8da39746
}
};
#ifdef __cplusplus
}
#endif
#endif /* _AESTAB2_H */

View File

@ -0,0 +1,334 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2009 Intel Corporation
* All Rights Reserved.
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Accelerated GHASH implementation with Intel PCLMULQDQ-NI
* instructions. This file contains an accelerated
* Galois Field Multiplication implementation.
*
* PCLMULQDQ is used to accelerate the most time-consuming part of GHASH,
* carry-less multiplication. More information about PCLMULQDQ can be
* found at:
* http://software.intel.com/en-us/articles/
* carry-less-multiplication-and-its-usage-for-computing-the-gcm-mode/
*
*/
/*
* ====================================================================
* OpenSolaris OS modifications
*
* This source originates as file galois_hash_asm.c from
* Intel Corporation dated September 21, 2009.
*
* This OpenSolaris version has these major changes from the original source:
*
* 1. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
* /usr/include/sys/asm_linkage.h, lint(1B) guards, and a dummy C function
* definition for lint.
*
* 2. Formatted code, added comments, and added #includes and #defines.
*
* 3. If bit CR0.TS is set, clear and set the TS bit, after and before
* calling kpreempt_disable() and kpreempt_enable().
* If the TS bit is not set, Save and restore %xmm registers at the beginning
* and end of function calls (%xmm* registers are not saved and restored by
* during kernel thread preemption).
*
* 4. Removed code to perform hashing. This is already done with C macro
* GHASH in gcm.c. For better performance, this removed code should be
* reintegrated in the future to replace the C GHASH macro.
*
* 5. Added code to byte swap 16-byte input and output.
*
* 6. Folded in comments from the original C source with embedded assembly
* (SB_w_shift_xor.c)
*
* 7. Renamed function and reordered parameters to match OpenSolaris:
* Intel interface:
* void galois_hash_asm(unsigned char *hk, unsigned char *s,
* unsigned char *d, int length)
* OpenSolaris OS interface:
* void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
* ====================================================================
*/
#if defined(lint) || defined(__lint)
#include <sys/types.h>
/* ARGSUSED */
void
gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res) {
}
#else /* lint */
#define _ASM
#include <sys/asm_linkage.h>
#ifdef _KERNEL
/*
* Note: the CLTS macro clobbers P2 (%rsi) under i86xpv. That is,
* it calls HYPERVISOR_fpu_taskswitch() which modifies %rsi when it
* uses it to pass P2 to syscall.
* This also occurs with the STTS macro, but we dont care if
* P2 (%rsi) is modified just before function exit.
* The CLTS and STTS macros push and pop P1 (%rdi) already.
*/
#ifdef __xpv
#define PROTECTED_CLTS \
push %rsi; \
CLTS; \
pop %rsi
#else
#define PROTECTED_CLTS \
CLTS
#endif /* __xpv */
/*
* If CR0_TS is not set, align stack (with push %rbp) and push
* %xmm0 - %xmm10 on stack, otherwise clear CR0_TS
*/
#define CLEAR_TS_OR_PUSH_XMM_REGISTERS(tmpreg) \
push %rbp; \
mov %rsp, %rbp; \
movq %cr0, tmpreg; \
testq $CR0_TS, tmpreg; \
jnz 1f; \
and $-XMM_ALIGN, %rsp; \
sub $[XMM_SIZE * 11], %rsp; \
movaps %xmm0, 160(%rsp); \
movaps %xmm1, 144(%rsp); \
movaps %xmm2, 128(%rsp); \
movaps %xmm3, 112(%rsp); \
movaps %xmm4, 96(%rsp); \
movaps %xmm5, 80(%rsp); \
movaps %xmm6, 64(%rsp); \
movaps %xmm7, 48(%rsp); \
movaps %xmm8, 32(%rsp); \
movaps %xmm9, 16(%rsp); \
movaps %xmm10, (%rsp); \
jmp 2f; \
1: \
PROTECTED_CLTS; \
2:
/*
* If CR0_TS was not set above, pop %xmm0 - %xmm10 off stack,
* otherwise set CR0_TS.
*/
#define SET_TS_OR_POP_XMM_REGISTERS(tmpreg) \
testq $CR0_TS, tmpreg; \
jnz 1f; \
movaps (%rsp), %xmm10; \
movaps 16(%rsp), %xmm9; \
movaps 32(%rsp), %xmm8; \
movaps 48(%rsp), %xmm7; \
movaps 64(%rsp), %xmm6; \
movaps 80(%rsp), %xmm5; \
movaps 96(%rsp), %xmm4; \
movaps 112(%rsp), %xmm3; \
movaps 128(%rsp), %xmm2; \
movaps 144(%rsp), %xmm1; \
movaps 160(%rsp), %xmm0; \
jmp 2f; \
1: \
STTS(tmpreg); \
2: \
mov %rbp, %rsp; \
pop %rbp
#else
#define PROTECTED_CLTS
#define CLEAR_TS_OR_PUSH_XMM_REGISTERS(tmpreg)
#define SET_TS_OR_POP_XMM_REGISTERS(tmpreg)
#endif /* _KERNEL */
/*
* Use this mask to byte-swap a 16-byte integer with the pshufb instruction
*/
// static uint8_t byte_swap16_mask[] = {
// 15, 14, 13, 12, 11, 10, 9, 8, 7, 6 ,5, 4, 3, 2, 1, 0 };
.text
.align XMM_ALIGN
.Lbyte_swap16_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/*
* void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
*
* Perform a carry-less multiplication (that is, use XOR instead of the
* multiply operator) on P1 and P2 and place the result in P3.
*
* Byte swap the input and the output.
*
* Note: x_in, y, and res all point to a block of 20-byte numbers
* (an array of two 64-bit integers).
*
* Note2: For kernel code, caller is responsible for ensuring
* kpreempt_disable() has been called. This is because %xmm registers are
* not saved/restored. Clear and set the CR0.TS bit on entry and exit,
* respectively, if TS is set on entry. Otherwise, if TS is not set,
* save and restore %xmm registers on the stack.
*
* Note3: Original Intel definition:
* void galois_hash_asm(unsigned char *hk, unsigned char *s,
* unsigned char *d, int length)
*
* Note4: Register/parameter mapping:
* Intel:
* Parameter 1: %rcx (copied to %xmm0) hk or x_in
* Parameter 2: %rdx (copied to %xmm1) s or y
* Parameter 3: %rdi (result) d or res
* OpenSolaris:
* Parameter 1: %rdi (copied to %xmm0) x_in
* Parameter 2: %rsi (copied to %xmm1) y
* Parameter 3: %rdx (result) res
*/
ENTRY_NP(gcm_mul_pclmulqdq)
CLEAR_TS_OR_PUSH_XMM_REGISTERS(%r10)
//
// Copy Parameters
//
movdqu (%rdi), %xmm0 // P1
movdqu (%rsi), %xmm1 // P2
//
// Byte swap 16-byte input
//
lea .Lbyte_swap16_mask(%rip), %rax
movaps (%rax), %xmm10
pshufb %xmm10, %xmm0
pshufb %xmm10, %xmm1
//
// Multiply with the hash key
//
movdqu %xmm0, %xmm3
pclmulqdq $0, %xmm1, %xmm3 // xmm3 holds a0*b0
movdqu %xmm0, %xmm4
pclmulqdq $16, %xmm1, %xmm4 // xmm4 holds a0*b1
movdqu %xmm0, %xmm5
pclmulqdq $1, %xmm1, %xmm5 // xmm5 holds a1*b0
movdqu %xmm0, %xmm6
pclmulqdq $17, %xmm1, %xmm6 // xmm6 holds a1*b1
pxor %xmm5, %xmm4 // xmm4 holds a0*b1 + a1*b0
movdqu %xmm4, %xmm5 // move the contents of xmm4 to xmm5
psrldq $8, %xmm4 // shift by xmm4 64 bits to the right
pslldq $8, %xmm5 // shift by xmm5 64 bits to the left
pxor %xmm5, %xmm3
pxor %xmm4, %xmm6 // Register pair <xmm6:xmm3> holds the result
// of the carry-less multiplication of
// xmm0 by xmm1.
// We shift the result of the multiplication by one bit position
// to the left to cope for the fact that the bits are reversed.
movdqu %xmm3, %xmm7
movdqu %xmm6, %xmm8
pslld $1, %xmm3
pslld $1, %xmm6
psrld $31, %xmm7
psrld $31, %xmm8
movdqu %xmm7, %xmm9
pslldq $4, %xmm8
pslldq $4, %xmm7
psrldq $12, %xmm9
por %xmm7, %xmm3
por %xmm8, %xmm6
por %xmm9, %xmm6
//
// First phase of the reduction
//
// Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts
// independently.
movdqu %xmm3, %xmm7
movdqu %xmm3, %xmm8
movdqu %xmm3, %xmm9
pslld $31, %xmm7 // packed right shift shifting << 31
pslld $30, %xmm8 // packed right shift shifting << 30
pslld $25, %xmm9 // packed right shift shifting << 25
pxor %xmm8, %xmm7 // xor the shifted versions
pxor %xmm9, %xmm7
movdqu %xmm7, %xmm8
pslldq $12, %xmm7
psrldq $4, %xmm8
pxor %xmm7, %xmm3 // first phase of the reduction complete
//
// Second phase of the reduction
//
// Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these
// shift operations.
movdqu %xmm3, %xmm2
movdqu %xmm3, %xmm4 // packed left shifting >> 1
movdqu %xmm3, %xmm5
psrld $1, %xmm2
psrld $2, %xmm4 // packed left shifting >> 2
psrld $7, %xmm5 // packed left shifting >> 7
pxor %xmm4, %xmm2 // xor the shifted versions
pxor %xmm5, %xmm2
pxor %xmm8, %xmm2
pxor %xmm2, %xmm3
pxor %xmm3, %xmm6 // the result is in xmm6
//
// Byte swap 16-byte result
//
pshufb %xmm10, %xmm6 // %xmm10 has the swap mask
//
// Store the result
//
movdqu %xmm6, (%rdx) // P3
//
// Cleanup and Return
//
SET_TS_OR_POP_XMM_REGISTERS(%r10)
ret
SET_SIZE(gcm_mul_pclmulqdq)
#endif /* lint || __lint */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,775 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/api.h>
#include <sys/crypto/impl.h>
#include <sys/modhash.h>
/* Cryptographic mechanisms tables and their access functions */
/*
* Internal numbers assigned to mechanisms are coded as follows:
*
* +----------------+----------------+
* | mech. class | mech. index |
* <--- 32-bits --->+<--- 32-bits --->
*
* the mech_class identifies the table the mechanism belongs to.
* mech_index is the index for that mechanism in the table.
* A mechanism belongs to exactly 1 table.
* The tables are:
* . digest_mechs_tab[] for the msg digest mechs.
* . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs.
* . mac_mechs_tab[] for MAC mechs.
* . sign_mechs_tab[] for sign & verify mechs.
* . keyops_mechs_tab[] for key/key pair generation, and key derivation.
* . misc_mechs_tab[] for mechs that don't belong to any of the above.
*
* There are no holes in the tables.
*/
/*
* Locking conventions:
* --------------------
* A global mutex, kcf_mech_tabs_lock, serializes writes to the
* mechanism table via kcf_create_mech_entry().
*
* A mutex is associated with every entry of the tables.
* The mutex is acquired whenever the entry is accessed for
* 1) retrieving the mech_id (comparing the mech name)
* 2) finding a provider for an xxx_init() or atomic operation.
* 3) altering the mechs entry to add or remove a provider.
*
* In 2), after a provider is chosen, its prov_desc is held and the
* entry's mutex must be dropped. The provider's working function (SPI) is
* called outside the mech_entry's mutex.
*
* The number of providers for a particular mechanism is not expected to be
* long enough to justify the cost of using rwlocks, so the per-mechanism
* entry mutex won't be very *hot*.
*
* When both kcf_mech_tabs_lock and a mech_entry mutex need to be held,
* kcf_mech_tabs_lock must always be acquired first.
*
*/
/* Mechanisms tables */
/* RFE 4687834 Will deal with the extensibility of these tables later */
kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN];
kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS];
kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC];
kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
{0, NULL}, /* No class zero */
{KCF_MAXDIGEST, kcf_digest_mechs_tab},
{KCF_MAXCIPHER, kcf_cipher_mechs_tab},
{KCF_MAXMAC, kcf_mac_mechs_tab},
{KCF_MAXSIGN, kcf_sign_mechs_tab},
{KCF_MAXKEYOPS, kcf_keyops_mechs_tab},
{KCF_MAXMISC, kcf_misc_mechs_tab}
};
/*
* Per-algorithm internal threasholds for the minimum input size of before
* offloading to hardware provider.
* Dispatching a crypto operation to a hardware provider entails paying the
* cost of an additional context switch. Measurments with Sun Accelerator 4000
* shows that 512-byte jobs or smaller are better handled in software.
* There is room for refinement here.
*
*/
int kcf_md5_threshold = 512;
int kcf_sha1_threshold = 512;
int kcf_des_threshold = 512;
int kcf_des3_threshold = 512;
int kcf_aes_threshold = 512;
int kcf_bf_threshold = 512;
int kcf_rc4_threshold = 512;
kmutex_t kcf_mech_tabs_lock;
static uint32_t kcf_gen_swprov = 0;
int kcf_mech_hash_size = 256;
mod_hash_t *kcf_mech_hash; /* mech name to id hash */
static crypto_mech_type_t
kcf_mech_hash_find(char *mechname)
{
mod_hash_val_t hv;
crypto_mech_type_t mt;
mt = CRYPTO_MECH_INVALID;
if (mod_hash_find(kcf_mech_hash, (mod_hash_key_t)mechname, &hv) == 0) {
mt = *(crypto_mech_type_t *)hv;
ASSERT(mt != CRYPTO_MECH_INVALID);
}
return (mt);
}
void
kcf_destroy_mech_tabs(void)
{
if (kcf_mech_hash) mod_hash_destroy_hash(kcf_mech_hash);
}
/*
* kcf_init_mech_tabs()
*
* Called by the misc/kcf's _init() routine to initialize the tables
* of mech_entry's.
*/
void
kcf_init_mech_tabs(void)
{
int i, max;
kcf_ops_class_t class;
kcf_mech_entry_t *me_tab;
/* Initializes the mutex locks. */
mutex_init(&kcf_mech_tabs_lock, NULL, MUTEX_DEFAULT, NULL);
/* Then the pre-defined mechanism entries */
/* Two digests */
(void) strncpy(kcf_digest_mechs_tab[0].me_name, SUN_CKM_MD5,
CRYPTO_MAX_MECH_NAME);
kcf_digest_mechs_tab[0].me_threshold = kcf_md5_threshold;
(void) strncpy(kcf_digest_mechs_tab[1].me_name, SUN_CKM_SHA1,
CRYPTO_MAX_MECH_NAME);
kcf_digest_mechs_tab[1].me_threshold = kcf_sha1_threshold;
/* The symmetric ciphers in various modes */
(void) strncpy(kcf_cipher_mechs_tab[0].me_name, SUN_CKM_DES_CBC,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[0].me_threshold = kcf_des_threshold;
(void) strncpy(kcf_cipher_mechs_tab[1].me_name, SUN_CKM_DES3_CBC,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[1].me_threshold = kcf_des3_threshold;
(void) strncpy(kcf_cipher_mechs_tab[2].me_name, SUN_CKM_DES_ECB,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[2].me_threshold = kcf_des_threshold;
(void) strncpy(kcf_cipher_mechs_tab[3].me_name, SUN_CKM_DES3_ECB,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[3].me_threshold = kcf_des3_threshold;
(void) strncpy(kcf_cipher_mechs_tab[4].me_name, SUN_CKM_BLOWFISH_CBC,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[4].me_threshold = kcf_bf_threshold;
(void) strncpy(kcf_cipher_mechs_tab[5].me_name, SUN_CKM_BLOWFISH_ECB,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[5].me_threshold = kcf_bf_threshold;
(void) strncpy(kcf_cipher_mechs_tab[6].me_name, SUN_CKM_AES_CBC,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[6].me_threshold = kcf_aes_threshold;
(void) strncpy(kcf_cipher_mechs_tab[7].me_name, SUN_CKM_AES_ECB,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[7].me_threshold = kcf_aes_threshold;
(void) strncpy(kcf_cipher_mechs_tab[8].me_name, SUN_CKM_RC4,
CRYPTO_MAX_MECH_NAME);
kcf_cipher_mechs_tab[8].me_threshold = kcf_rc4_threshold;
/* 4 HMACs */
(void) strncpy(kcf_mac_mechs_tab[0].me_name, SUN_CKM_MD5_HMAC,
CRYPTO_MAX_MECH_NAME);
kcf_mac_mechs_tab[0].me_threshold = kcf_md5_threshold;
(void) strncpy(kcf_mac_mechs_tab[1].me_name, SUN_CKM_MD5_HMAC_GENERAL,
CRYPTO_MAX_MECH_NAME);
kcf_mac_mechs_tab[1].me_threshold = kcf_md5_threshold;
(void) strncpy(kcf_mac_mechs_tab[2].me_name, SUN_CKM_SHA1_HMAC,
CRYPTO_MAX_MECH_NAME);
kcf_mac_mechs_tab[2].me_threshold = kcf_sha1_threshold;
(void) strncpy(kcf_mac_mechs_tab[3].me_name, SUN_CKM_SHA1_HMAC_GENERAL,
CRYPTO_MAX_MECH_NAME);
kcf_mac_mechs_tab[3].me_threshold = kcf_sha1_threshold;
/* 1 random number generation pseudo mechanism */
(void) strncpy(kcf_misc_mechs_tab[0].me_name, SUN_RANDOM,
CRYPTO_MAX_MECH_NAME);
kcf_mech_hash = mod_hash_create_strhash_nodtr("kcf mech2id hash",
kcf_mech_hash_size, mod_hash_null_valdtor);
for (class = KCF_FIRST_OPSCLASS; class <= KCF_LAST_OPSCLASS; class++) {
max = kcf_mech_tabs_tab[class].met_size;
me_tab = kcf_mech_tabs_tab[class].met_tab;
for (i = 0; i < max; i++) {
mutex_init(&(me_tab[i].me_mutex), NULL,
MUTEX_DEFAULT, NULL);
if (me_tab[i].me_name[0] != 0) {
me_tab[i].me_mechid = KCF_MECHID(class, i);
(void) mod_hash_insert(kcf_mech_hash,
(mod_hash_key_t)me_tab[i].me_name,
(mod_hash_val_t)&(me_tab[i].me_mechid));
}
}
}
}
/*
* kcf_create_mech_entry()
*
* Arguments:
* . The class of mechanism.
* . the name of the new mechanism.
*
* Description:
* Creates a new mech_entry for a mechanism not yet known to the
* framework.
* This routine is called by kcf_add_mech_provider, which is
* in turn invoked for each mechanism supported by a provider.
* The'class' argument depends on the crypto_func_group_t bitmask
* in the registering provider's mech_info struct for this mechanism.
* When there is ambiguity in the mapping between the crypto_func_group_t
* and a class (dual ops, ...) the KCF_MISC_CLASS should be used.
*
* Context:
* User context only.
*
* Returns:
* KCF_INVALID_MECH_CLASS or KCF_INVALID_MECH_NAME if the class or
* the mechname is bogus.
* KCF_MECH_TAB_FULL when there is no room left in the mech. tabs.
* KCF_SUCCESS otherwise.
*/
static int
kcf_create_mech_entry(kcf_ops_class_t class, char *mechname)
{
crypto_mech_type_t mt;
kcf_mech_entry_t *me_tab;
int i = 0, size;
if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS))
return (KCF_INVALID_MECH_CLASS);
if ((mechname == NULL) || (mechname[0] == 0))
return (KCF_INVALID_MECH_NAME);
/*
* First check if the mechanism is already in one of the tables.
* The mech_entry could be in another class.
*/
mutex_enter(&kcf_mech_tabs_lock);
mt = kcf_mech_hash_find(mechname);
if (mt != CRYPTO_MECH_INVALID) {
/* Nothing to do, regardless the suggested class. */
mutex_exit(&kcf_mech_tabs_lock);
return (KCF_SUCCESS);
}
/* Now take the next unused mech entry in the class's tab */
me_tab = kcf_mech_tabs_tab[class].met_tab;
size = kcf_mech_tabs_tab[class].met_size;
while (i < size) {
mutex_enter(&(me_tab[i].me_mutex));
if (me_tab[i].me_name[0] == 0) {
/* Found an empty spot */
(void) strncpy(me_tab[i].me_name, mechname,
CRYPTO_MAX_MECH_NAME);
me_tab[i].me_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
me_tab[i].me_mechid = KCF_MECHID(class, i);
/*
* No a-priori information about the new mechanism, so
* the threshold is set to zero.
*/
me_tab[i].me_threshold = 0;
mutex_exit(&(me_tab[i].me_mutex));
/* Add the new mechanism to the hash table */
(void) mod_hash_insert(kcf_mech_hash,
(mod_hash_key_t)me_tab[i].me_name,
(mod_hash_val_t)&(me_tab[i].me_mechid));
break;
}
mutex_exit(&(me_tab[i].me_mutex));
i++;
}
mutex_exit(&kcf_mech_tabs_lock);
if (i == size) {
return (KCF_MECH_TAB_FULL);
}
return (KCF_SUCCESS);
}
/*
* kcf_add_mech_provider()
*
* Arguments:
* . An index in to the provider mechanism array
* . A pointer to the provider descriptor
* . A storage for the kcf_prov_mech_desc_t the entry was added at.
*
* Description:
* Adds a new provider of a mechanism to the mechanism's mech_entry
* chain.
*
* Context:
* User context only.
*
* Returns
* KCF_SUCCESS on success
* KCF_MECH_TAB_FULL otherwise.
*/
int
kcf_add_mech_provider(short mech_indx,
kcf_provider_desc_t *prov_desc, kcf_prov_mech_desc_t **pmdpp)
{
int error;
kcf_mech_entry_t *mech_entry = NULL;
crypto_mech_info_t *mech_info;
crypto_mech_type_t kcf_mech_type, mt;
kcf_prov_mech_desc_t *prov_mech, *prov_mech2;
crypto_func_group_t simple_fg_mask, dual_fg_mask;
crypto_mech_info_t *dmi;
crypto_mech_info_list_t *mil, *mil2;
kcf_mech_entry_t *me;
int i;
ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
mech_info = &prov_desc->pd_mechanisms[mech_indx];
/*
* A mechanism belongs to exactly one mechanism table.
* Find the class corresponding to the function group flag of
* the mechanism.
*/
kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
if (kcf_mech_type == CRYPTO_MECH_INVALID) {
crypto_func_group_t fg = mech_info->cm_func_group_mask;
kcf_ops_class_t class;
if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC)
class = KCF_DIGEST_CLASS;
else if (fg & CRYPTO_FG_ENCRYPT || fg & CRYPTO_FG_DECRYPT ||
fg & CRYPTO_FG_ENCRYPT_ATOMIC ||
fg & CRYPTO_FG_DECRYPT_ATOMIC)
class = KCF_CIPHER_CLASS;
else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)
class = KCF_MAC_CLASS;
else if (fg & CRYPTO_FG_SIGN || fg & CRYPTO_FG_VERIFY ||
fg & CRYPTO_FG_SIGN_ATOMIC ||
fg & CRYPTO_FG_VERIFY_ATOMIC ||
fg & CRYPTO_FG_SIGN_RECOVER ||
fg & CRYPTO_FG_VERIFY_RECOVER)
class = KCF_SIGN_CLASS;
else if (fg & CRYPTO_FG_GENERATE ||
fg & CRYPTO_FG_GENERATE_KEY_PAIR ||
fg & CRYPTO_FG_WRAP || fg & CRYPTO_FG_UNWRAP ||
fg & CRYPTO_FG_DERIVE)
class = KCF_KEYOPS_CLASS;
else
class = KCF_MISC_CLASS;
/*
* Attempt to create a new mech_entry for the specified
* mechanism. kcf_create_mech_entry() can handle the case
* where such an entry already exists.
*/
if ((error = kcf_create_mech_entry(class,
mech_info->cm_mech_name)) != KCF_SUCCESS) {
return (error);
}
/* get the KCF mech type that was assigned to the mechanism */
kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
ASSERT(kcf_mech_type != CRYPTO_MECH_INVALID);
}
error = kcf_get_mech_entry(kcf_mech_type, &mech_entry);
ASSERT(error == KCF_SUCCESS);
/* allocate and initialize new kcf_prov_mech_desc */
prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP);
bcopy(mech_info, &prov_mech->pm_mech_info, sizeof (crypto_mech_info_t));
prov_mech->pm_prov_desc = prov_desc;
prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)]
[KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
dual_fg_mask = mech_info->cm_func_group_mask & CRYPTO_FG_DUAL_MASK;
if (dual_fg_mask == ((crypto_func_group_t)0))
goto add_entry;
simple_fg_mask = (mech_info->cm_func_group_mask &
CRYPTO_FG_SIMPLEOP_MASK) | CRYPTO_FG_RANDOM;
for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
dmi = &prov_desc->pd_mechanisms[i];
/* skip self */
if (dmi->cm_mech_number == mech_info->cm_mech_number)
continue;
/* skip if not a dual operation mechanism */
if (!(dmi->cm_func_group_mask & dual_fg_mask) ||
(dmi->cm_func_group_mask & simple_fg_mask))
continue;
mt = kcf_mech_hash_find(dmi->cm_mech_name);
if (mt == CRYPTO_MECH_INVALID)
continue;
if (kcf_get_mech_entry(mt, &me) != KCF_SUCCESS)
continue;
mil = kmem_zalloc(sizeof (*mil), KM_SLEEP);
mil2 = kmem_zalloc(sizeof (*mil2), KM_SLEEP);
/*
* Ignore hard-coded entries in the mech table
* if the provider hasn't registered.
*/
mutex_enter(&me->me_mutex);
if (me->me_hw_prov_chain == NULL && me->me_sw_prov == NULL) {
mutex_exit(&me->me_mutex);
kmem_free(mil, sizeof (*mil));
kmem_free(mil2, sizeof (*mil2));
continue;
}
/*
* Add other dual mechanisms that have registered
* with the framework to this mechanism's
* cross-reference list.
*/
mil->ml_mech_info = *dmi; /* struct assignment */
mil->ml_kcf_mechid = mt;
/* add to head of list */
mil->ml_next = prov_mech->pm_mi_list;
prov_mech->pm_mi_list = mil;
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
prov_mech2 = me->me_hw_prov_chain;
else
prov_mech2 = me->me_sw_prov;
if (prov_mech2 == NULL) {
kmem_free(mil2, sizeof (*mil2));
mutex_exit(&me->me_mutex);
continue;
}
/*
* Update all other cross-reference lists by
* adding this new mechanism.
*/
while (prov_mech2 != NULL) {
if (prov_mech2->pm_prov_desc == prov_desc) {
/* struct assignment */
mil2->ml_mech_info = *mech_info;
mil2->ml_kcf_mechid = kcf_mech_type;
/* add to head of list */
mil2->ml_next = prov_mech2->pm_mi_list;
prov_mech2->pm_mi_list = mil2;
break;
}
prov_mech2 = prov_mech2->pm_next;
}
if (prov_mech2 == NULL)
kmem_free(mil2, sizeof (*mil2));
mutex_exit(&me->me_mutex);
}
add_entry:
/*
* Add new kcf_prov_mech_desc at the front of HW providers
* chain.
*/
switch (prov_desc->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
mutex_enter(&mech_entry->me_mutex);
prov_mech->pm_me = mech_entry;
prov_mech->pm_next = mech_entry->me_hw_prov_chain;
mech_entry->me_hw_prov_chain = prov_mech;
mech_entry->me_num_hwprov++;
mutex_exit(&mech_entry->me_mutex);
break;
case CRYPTO_SW_PROVIDER:
mutex_enter(&mech_entry->me_mutex);
if (mech_entry->me_sw_prov != NULL) {
/*
* There is already a SW provider for this mechanism.
* Since we allow only one SW provider per mechanism,
* report this condition.
*/
cmn_err(CE_WARN, "The cryptographic software provider "
"\"%s\" will not be used for %s. The provider "
"\"%s\" will be used for this mechanism "
"instead.", prov_desc->pd_description,
mech_info->cm_mech_name,
mech_entry->me_sw_prov->pm_prov_desc->
pd_description);
KCF_PROV_REFRELE(prov_desc);
kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
prov_mech = NULL;
} else {
/*
* Set the provider as the software provider for
* this mechanism.
*/
mech_entry->me_sw_prov = prov_mech;
/* We'll wrap around after 4 billion registrations! */
mech_entry->me_gen_swprov = kcf_gen_swprov++;
}
mutex_exit(&mech_entry->me_mutex);
break;
default:
break;
}
*pmdpp = prov_mech;
return (KCF_SUCCESS);
}
/*
* kcf_remove_mech_provider()
*
* Arguments:
* . mech_name: the name of the mechanism.
* . prov_desc: The provider descriptor
*
* Description:
* Removes a provider from chain of provider descriptors.
* The provider is made unavailable to kernel consumers for the specified
* mechanism.
*
* Context:
* User context only.
*/
void
kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
{
crypto_mech_type_t mech_type;
kcf_prov_mech_desc_t *prov_mech = NULL, *prov_chain;
kcf_prov_mech_desc_t **prev_entry_next;
kcf_mech_entry_t *mech_entry;
crypto_mech_info_list_t *mil, *mil2, *next, **prev_next;
ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* get the KCF mech type that was assigned to the mechanism */
if ((mech_type = kcf_mech_hash_find(mech_name)) ==
CRYPTO_MECH_INVALID) {
/*
* Provider was not allowed for this mech due to policy or
* configuration.
*/
return;
}
/* get a ptr to the mech_entry that was created */
if (kcf_get_mech_entry(mech_type, &mech_entry) != KCF_SUCCESS) {
/*
* Provider was not allowed for this mech due to policy or
* configuration.
*/
return;
}
mutex_enter(&mech_entry->me_mutex);
switch (prov_desc->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/* find the provider in the mech_entry chain */
prev_entry_next = &mech_entry->me_hw_prov_chain;
prov_mech = mech_entry->me_hw_prov_chain;
while (prov_mech != NULL &&
prov_mech->pm_prov_desc != prov_desc) {
prev_entry_next = &prov_mech->pm_next;
prov_mech = prov_mech->pm_next;
}
if (prov_mech == NULL) {
/* entry not found, simply return */
mutex_exit(&mech_entry->me_mutex);
return;
}
/* remove provider entry from mech_entry chain */
*prev_entry_next = prov_mech->pm_next;
ASSERT(mech_entry->me_num_hwprov > 0);
mech_entry->me_num_hwprov--;
break;
case CRYPTO_SW_PROVIDER:
if (mech_entry->me_sw_prov == NULL ||
mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
/* not the software provider for this mechanism */
mutex_exit(&mech_entry->me_mutex);
return;
}
prov_mech = mech_entry->me_sw_prov;
mech_entry->me_sw_prov = NULL;
break;
default:
break;
}
mutex_exit(&mech_entry->me_mutex);
/* Free the dual ops cross-reference lists */
mil = prov_mech->pm_mi_list;
while (mil != NULL) {
next = mil->ml_next;
if (kcf_get_mech_entry(mil->ml_kcf_mechid,
&mech_entry) != KCF_SUCCESS) {
mil = next;
continue;
}
mutex_enter(&mech_entry->me_mutex);
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
prov_chain = mech_entry->me_hw_prov_chain;
else
prov_chain = mech_entry->me_sw_prov;
while (prov_chain != NULL) {
if (prov_chain->pm_prov_desc == prov_desc) {
prev_next = &prov_chain->pm_mi_list;
mil2 = prov_chain->pm_mi_list;
while (mil2 != NULL &&
mil2->ml_kcf_mechid != mech_type) {
prev_next = &mil2->ml_next;
mil2 = mil2->ml_next;
}
if (mil2 != NULL) {
*prev_next = mil2->ml_next;
kmem_free(mil2, sizeof (*mil2));
}
break;
}
prov_chain = prov_chain->pm_next;
}
mutex_exit(&mech_entry->me_mutex);
kmem_free(mil, sizeof (crypto_mech_info_list_t));
mil = next;
}
/* free entry */
KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
KCF_PROV_IREFRELE(prov_mech->pm_prov_desc);
kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
}
/*
* kcf_get_mech_entry()
*
* Arguments:
* . The framework mechanism type
* . Storage for the mechanism entry
*
* Description:
* Retrieves the mechanism entry for the mech.
*
* Context:
* User and interrupt contexts.
*
* Returns:
* KCF_MECHANISM_XXX appropriate error code.
* KCF_SUCCESS otherwise.
*/
int
kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep)
{
kcf_ops_class_t class;
int index;
kcf_mech_entry_tab_t *me_tab;
ASSERT(mep != NULL);
class = KCF_MECH2CLASS(mech_type);
if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
/* the caller won't need to know it's an invalid class */
return (KCF_INVALID_MECH_NUMBER);
}
me_tab = &kcf_mech_tabs_tab[class];
index = KCF_MECH2INDEX(mech_type);
if ((index < 0) || (index >= me_tab->met_size)) {
return (KCF_INVALID_MECH_NUMBER);
}
*mep = &((me_tab->met_tab)[index]);
return (KCF_SUCCESS);
}
/* CURRENTLY UNSUPPORTED: attempting to load the module if it isn't found */
/*
* Lookup the hash table for an entry that matches the mechname.
* If there are no hardware or software providers for the mechanism,
* but there is an unloaded software provider, this routine will attempt
* to load it.
*
* If the MOD_NOAUTOUNLOAD flag is not set, a software provider is
* in constant danger of being unloaded. For consumers that call
* crypto_mech2id() only once, the provider will not be reloaded
* if it becomes unloaded. If a provider gets loaded elsewhere
* without the MOD_NOAUTOUNLOAD flag being set, we set it now.
*/
crypto_mech_type_t
crypto_mech2id_common(char *mechname, boolean_t load_module)
{
crypto_mech_type_t mt = kcf_mech_hash_find(mechname);
return (mt);
}

View File

@ -0,0 +1,229 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
/*
* Utility routine to copy a buffer to a crypto_data structure.
*/
/*
* Utility routine to apply the command, 'cmd', to the
* data in the uio structure.
*/
int
crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd,
void *digest_ctx, void (*update)(void))
{
uio_t *uiop = data->cd_uio;
off_t offset = data->cd_offset;
size_t length = len;
uint_t vec_idx;
size_t cur_len;
uchar_t *datap;
ASSERT(data->cd_format == CRYPTO_DATA_UIO);
if (uiop->uio_segflg != UIO_SYSSPACE) {
return (CRYPTO_ARGUMENTS_BAD);
}
/*
* Jump to the first iovec containing data to be
* processed.
*/
for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
offset >= uiop->uio_iov[vec_idx].iov_len;
offset -= uiop->uio_iov[vec_idx++].iov_len)
;
if (vec_idx == uiop->uio_iovcnt) {
/*
* The caller specified an offset that is larger than
* the total size of the buffers it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
while (vec_idx < uiop->uio_iovcnt && length > 0) {
cur_len = MIN(uiop->uio_iov[vec_idx].iov_len -
offset, length);
datap = (uchar_t *)(uiop->uio_iov[vec_idx].iov_base +
offset);
switch (cmd) {
case COPY_FROM_DATA:
bcopy(datap, buf, cur_len);
buf += cur_len;
break;
case COPY_TO_DATA:
bcopy(buf, datap, cur_len);
buf += cur_len;
break;
case COMPARE_TO_DATA:
if (bcmp(datap, buf, cur_len))
return (CRYPTO_SIGNATURE_INVALID);
buf += cur_len;
break;
case MD5_DIGEST_DATA:
case SHA1_DIGEST_DATA:
case SHA2_DIGEST_DATA:
case GHASH_DATA:
return (CRYPTO_ARGUMENTS_BAD);
}
length -= cur_len;
vec_idx++;
offset = 0;
}
if (vec_idx == uiop->uio_iovcnt && length > 0) {
/*
* The end of the specified iovec's was reached but
* the length requested could not be processed.
*/
switch (cmd) {
case COPY_TO_DATA:
data->cd_length = len;
return (CRYPTO_BUFFER_TOO_SMALL);
default:
return (CRYPTO_DATA_LEN_RANGE);
}
}
return (CRYPTO_SUCCESS);
}
int
crypto_put_output_data(uchar_t *buf, crypto_data_t *output, int len)
{
switch (output->cd_format) {
case CRYPTO_DATA_RAW:
if (output->cd_raw.iov_len < len) {
output->cd_length = len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
bcopy(buf, (uchar_t *)(output->cd_raw.iov_base +
output->cd_offset), len);
break;
case CRYPTO_DATA_UIO:
return (crypto_uio_data(output, buf, len,
COPY_TO_DATA, NULL, NULL));
default:
return (CRYPTO_ARGUMENTS_BAD);
}
return (CRYPTO_SUCCESS);
}
int
crypto_update_iov(void *ctx, crypto_data_t *input, crypto_data_t *output,
int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
void (*copy_block)(uint8_t *, uint64_t *))
{
common_ctx_t *common_ctx = ctx;
int rv;
if (input->cd_miscdata != NULL) {
copy_block((uint8_t *)input->cd_miscdata,
&common_ctx->cc_iv[0]);
}
if (input->cd_raw.iov_len < input->cd_length)
return (CRYPTO_ARGUMENTS_BAD);
rv = (cipher)(ctx, input->cd_raw.iov_base + input->cd_offset,
input->cd_length, (input == output) ? NULL : output);
return (rv);
}
int
crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
void (*copy_block)(uint8_t *, uint64_t *))
{
common_ctx_t *common_ctx = ctx;
uio_t *uiop = input->cd_uio;
off_t offset = input->cd_offset;
size_t length = input->cd_length;
uint_t vec_idx;
size_t cur_len;
if (input->cd_miscdata != NULL) {
copy_block((uint8_t *)input->cd_miscdata,
&common_ctx->cc_iv[0]);
}
if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
return (CRYPTO_ARGUMENTS_BAD);
}
/*
* Jump to the first iovec containing data to be
* processed.
*/
for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
offset >= uiop->uio_iov[vec_idx].iov_len;
offset -= uiop->uio_iov[vec_idx++].iov_len)
;
if (vec_idx == uiop->uio_iovcnt) {
/*
* The caller specified an offset that is larger than the
* total size of the buffers it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
/*
* Now process the iovecs.
*/
while (vec_idx < uiop->uio_iovcnt && length > 0) {
cur_len = MIN(uiop->uio_iov[vec_idx].iov_len -
offset, length);
(cipher)(ctx, uiop->uio_iov[vec_idx].iov_base + offset,
cur_len, (input == output) ? NULL : output);
length -= cur_len;
vec_idx++;
offset = 0;
}
if (vec_idx == uiop->uio_iovcnt && length > 0) {
/*
* The end of the specified iovec's was reached but
* the length requested could not be processed, i.e.
* The caller requested to digest more data than it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
return (CRYPTO_SUCCESS);
}

View File

@ -0,0 +1,638 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file is part of the core Kernel Cryptographic Framework.
* It implements the management of tables of Providers. Entries to
* added and removed when cryptographic providers register with
* and unregister from the framework, respectively. The KCF scheduler
* and ioctl pseudo driver call this function to obtain the list
* of available providers.
*
* The provider table is indexed by crypto_provider_id_t. Each
* element of the table contains a pointer to a provider descriptor,
* or NULL if the entry is free.
*
* This file also implements helper functions to allocate and free
* provider descriptors.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
#include <sys/crypto/spi.h>
#define KCF_MAX_PROVIDERS 512 /* max number of providers */
/*
* Prov_tab is an array of providers which is updated when
* a crypto provider registers with kcf. The provider calls the
* SPI routine, crypto_register_provider(), which in turn calls
* kcf_prov_tab_add_provider().
*
* A provider unregisters by calling crypto_unregister_provider()
* which triggers the removal of the prov_tab entry.
* It also calls kcf_remove_mech_provider().
*
* prov_tab entries are not updated from kcf.conf or by cryptoadm(1M).
*/
static kcf_provider_desc_t **prov_tab = NULL;
static kmutex_t prov_tab_mutex; /* ensure exclusive access to the table */
static uint_t prov_tab_num = 0; /* number of providers in table */
static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
void
kcf_prov_tab_destroy(void)
{
if (prov_tab) kmem_free(prov_tab, prov_tab_max *
sizeof (kcf_provider_desc_t *));
}
/*
* Initialize a mutex and the KCF providers table, prov_tab.
* The providers table is dynamically allocated with prov_tab_max entries.
* Called from kcf module _init().
*/
void
kcf_prov_tab_init(void)
{
mutex_init(&prov_tab_mutex, NULL, MUTEX_DEFAULT, NULL);
prov_tab = kmem_zalloc(prov_tab_max * sizeof (kcf_provider_desc_t *),
KM_SLEEP);
}
/*
* Add a provider to the provider table. If no free entry can be found
* for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add
* the provider to the table, initialize the pd_prov_id field
* of the specified provider descriptor to the index in that table,
* and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the
* provider when pointed to by a table entry.
*/
int
kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc)
{
uint_t i;
ASSERT(prov_tab != NULL);
mutex_enter(&prov_tab_mutex);
/* find free slot in providers table */
for (i = 1; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++)
;
if (i == KCF_MAX_PROVIDERS) {
/* ran out of providers entries */
mutex_exit(&prov_tab_mutex);
cmn_err(CE_WARN, "out of providers entries");
return (CRYPTO_HOST_MEMORY);
}
/* initialize entry */
prov_tab[i] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
prov_tab_num++;
mutex_exit(&prov_tab_mutex);
/* update provider descriptor */
prov_desc->pd_prov_id = i;
/*
* The KCF-private provider handle is defined as the internal
* provider id.
*/
prov_desc->pd_kcf_prov_handle =
(crypto_kcf_provider_handle_t)prov_desc->pd_prov_id;
return (CRYPTO_SUCCESS);
}
/*
* Remove the provider specified by its id. A REFRELE is done on the
* corresponding provider descriptor before this function returns.
* Returns CRYPTO_UNKNOWN_PROVIDER if the provider id is not valid.
*/
int
kcf_prov_tab_rem_provider(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
ASSERT(prov_tab != NULL);
ASSERT(prov_tab_num >= 0);
/*
* Validate provider id, since it can be specified by a 3rd-party
* provider.
*/
mutex_enter(&prov_tab_mutex);
if (prov_id >= KCF_MAX_PROVIDERS ||
((prov_desc = prov_tab[prov_id]) == NULL)) {
mutex_exit(&prov_tab_mutex);
return (CRYPTO_INVALID_PROVIDER_ID);
}
mutex_exit(&prov_tab_mutex);
/*
* The provider id must remain valid until the associated provider
* descriptor is freed. For this reason, we simply release our
* reference to the descriptor here. When the reference count
* reaches zero, kcf_free_provider_desc() will be invoked and
* the associated entry in the providers table will be released
* at that time.
*/
KCF_PROV_REFRELE(prov_desc);
KCF_PROV_IREFRELE(prov_desc);
return (CRYPTO_SUCCESS);
}
/*
* Returns the provider descriptor corresponding to the specified
* provider id. A REFHOLD is done on the descriptor before it is
* returned to the caller. It is the responsibility of the caller
* to do a REFRELE once it is done with the provider descriptor.
*/
kcf_provider_desc_t *
kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
mutex_enter(&prov_tab_mutex);
prov_desc = prov_tab[prov_id];
if (prov_desc == NULL) {
mutex_exit(&prov_tab_mutex);
return (NULL);
}
KCF_PROV_REFHOLD(prov_desc);
mutex_exit(&prov_tab_mutex);
return (prov_desc);
}
static void
allocate_ops_v1(crypto_ops_t *src, crypto_ops_t *dst, uint_t *mech_list_count)
{
if (src->co_control_ops != NULL)
dst->co_control_ops = kmem_alloc(sizeof (crypto_control_ops_t),
KM_SLEEP);
if (src->co_digest_ops != NULL)
dst->co_digest_ops = kmem_alloc(sizeof (crypto_digest_ops_t),
KM_SLEEP);
if (src->co_cipher_ops != NULL)
dst->co_cipher_ops = kmem_alloc(sizeof (crypto_cipher_ops_t),
KM_SLEEP);
if (src->co_mac_ops != NULL)
dst->co_mac_ops = kmem_alloc(sizeof (crypto_mac_ops_t),
KM_SLEEP);
if (src->co_sign_ops != NULL)
dst->co_sign_ops = kmem_alloc(sizeof (crypto_sign_ops_t),
KM_SLEEP);
if (src->co_verify_ops != NULL)
dst->co_verify_ops = kmem_alloc(sizeof (crypto_verify_ops_t),
KM_SLEEP);
if (src->co_dual_ops != NULL)
dst->co_dual_ops = kmem_alloc(sizeof (crypto_dual_ops_t),
KM_SLEEP);
if (src->co_dual_cipher_mac_ops != NULL)
dst->co_dual_cipher_mac_ops = kmem_alloc(
sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
if (src->co_random_ops != NULL) {
dst->co_random_ops = kmem_alloc(
sizeof (crypto_random_number_ops_t), KM_SLEEP);
/*
* Allocate storage to store the array of supported mechanisms
* specified by provider. We allocate extra mechanism storage
* if the provider has random_ops since we keep an internal
* mechanism, SUN_RANDOM, in this case.
*/
(*mech_list_count)++;
}
if (src->co_session_ops != NULL)
dst->co_session_ops = kmem_alloc(sizeof (crypto_session_ops_t),
KM_SLEEP);
if (src->co_object_ops != NULL)
dst->co_object_ops = kmem_alloc(sizeof (crypto_object_ops_t),
KM_SLEEP);
if (src->co_key_ops != NULL)
dst->co_key_ops = kmem_alloc(sizeof (crypto_key_ops_t),
KM_SLEEP);
if (src->co_provider_ops != NULL)
dst->co_provider_ops = kmem_alloc(
sizeof (crypto_provider_management_ops_t), KM_SLEEP);
if (src->co_ctx_ops != NULL)
dst->co_ctx_ops = kmem_alloc(sizeof (crypto_ctx_ops_t),
KM_SLEEP);
}
static void
allocate_ops_v2(crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_mech_ops != NULL)
dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t),
KM_SLEEP);
}
static void
allocate_ops_v3(crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_nostore_key_ops != NULL)
dst->co_nostore_key_ops =
kmem_alloc(sizeof (crypto_nostore_key_ops_t), KM_SLEEP);
}
/*
* Allocate a provider descriptor. mech_list_count specifies the
* number of mechanisms supported by the providers, and is used
* to allocate storage for the mechanism table.
* This function may sleep while allocating memory, which is OK
* since it is invoked from user context during provider registration.
*/
kcf_provider_desc_t *
kcf_alloc_provider_desc(crypto_provider_info_t *info)
{
int i, j;
kcf_provider_desc_t *desc;
uint_t mech_list_count = info->pi_mech_list_count;
crypto_ops_t *src_ops = info->pi_ops_vector;
desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
/*
* pd_description serves two purposes
* - Appears as a blank padded PKCS#11 style string, that will be
* returned to applications in CK_SLOT_INFO.slotDescription.
* This means that we should not have a null character in the
* first CRYPTO_PROVIDER_DESCR_MAX_LEN bytes.
* - Appears as a null-terminated string that can be used by
* other kcf routines.
*
* So, we allocate enough room for one extra null terminator
* which keeps every one happy.
*/
desc->pd_description = kmem_alloc(CRYPTO_PROVIDER_DESCR_MAX_LEN + 1,
KM_SLEEP);
(void) memset(desc->pd_description, ' ',
CRYPTO_PROVIDER_DESCR_MAX_LEN);
desc->pd_description[CRYPTO_PROVIDER_DESCR_MAX_LEN] = '\0';
/*
* Since the framework does not require the ops vector specified
* by the providers during registration to be persistent,
* KCF needs to allocate storage where copies of the ops
* vectors are copied.
*/
desc->pd_ops_vector = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP);
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
allocate_ops_v1(src_ops, desc->pd_ops_vector, &mech_list_count);
if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2)
allocate_ops_v2(src_ops, desc->pd_ops_vector);
if (info->pi_interface_version == CRYPTO_SPI_VERSION_3)
allocate_ops_v3(src_ops, desc->pd_ops_vector);
}
desc->pd_mech_list_count = mech_list_count;
desc->pd_mechanisms = kmem_zalloc(sizeof (crypto_mech_info_t) *
mech_list_count, KM_SLEEP);
for (i = 0; i < KCF_OPS_CLASSSIZE; i++)
for (j = 0; j < KCF_MAXMECHTAB; j++)
desc->pd_mech_indx[i][j] = KCF_INVALID_INDX;
desc->pd_prov_id = KCF_PROVID_INVALID;
desc->pd_state = KCF_PROV_ALLOCATED;
mutex_init(&desc->pd_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&desc->pd_resume_cv, NULL, CV_DEFAULT, NULL);
cv_init(&desc->pd_remove_cv, NULL, CV_DEFAULT, NULL);
return (desc);
}
/*
* Called by KCF_PROV_REFRELE when a provider's reference count drops
* to zero. We free the descriptor when the last reference is released.
* However, for software providers, we do not free it when there is an
* unregister thread waiting. We signal that thread in this case and
* that thread is responsible for freeing the descriptor.
*/
void
kcf_provider_zero_refcnt(kcf_provider_desc_t *desc)
{
mutex_enter(&desc->pd_lock);
switch (desc->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
if (desc->pd_state == KCF_PROV_REMOVED ||
desc->pd_state == KCF_PROV_DISABLED) {
desc->pd_state = KCF_PROV_FREED;
cv_broadcast(&desc->pd_remove_cv);
mutex_exit(&desc->pd_lock);
break;
}
/* FALLTHRU */
case CRYPTO_HW_PROVIDER:
case CRYPTO_LOGICAL_PROVIDER:
mutex_exit(&desc->pd_lock);
kcf_free_provider_desc(desc);
}
}
/*
* Free a provider descriptor.
*/
void
kcf_free_provider_desc(kcf_provider_desc_t *desc)
{
if (desc == NULL)
return;
mutex_enter(&prov_tab_mutex);
if (desc->pd_prov_id != KCF_PROVID_INVALID) {
/* release the associated providers table entry */
ASSERT(prov_tab[desc->pd_prov_id] != NULL);
prov_tab[desc->pd_prov_id] = NULL;
prov_tab_num--;
}
mutex_exit(&prov_tab_mutex);
/* free the kernel memory associated with the provider descriptor */
if (desc->pd_description != NULL)
kmem_free(desc->pd_description,
CRYPTO_PROVIDER_DESCR_MAX_LEN + 1);
if (desc->pd_ops_vector != NULL) {
if (desc->pd_ops_vector->co_control_ops != NULL)
kmem_free(desc->pd_ops_vector->co_control_ops,
sizeof (crypto_control_ops_t));
if (desc->pd_ops_vector->co_digest_ops != NULL)
kmem_free(desc->pd_ops_vector->co_digest_ops,
sizeof (crypto_digest_ops_t));
if (desc->pd_ops_vector->co_cipher_ops != NULL)
kmem_free(desc->pd_ops_vector->co_cipher_ops,
sizeof (crypto_cipher_ops_t));
if (desc->pd_ops_vector->co_mac_ops != NULL)
kmem_free(desc->pd_ops_vector->co_mac_ops,
sizeof (crypto_mac_ops_t));
if (desc->pd_ops_vector->co_sign_ops != NULL)
kmem_free(desc->pd_ops_vector->co_sign_ops,
sizeof (crypto_sign_ops_t));
if (desc->pd_ops_vector->co_verify_ops != NULL)
kmem_free(desc->pd_ops_vector->co_verify_ops,
sizeof (crypto_verify_ops_t));
if (desc->pd_ops_vector->co_dual_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_ops,
sizeof (crypto_dual_ops_t));
if (desc->pd_ops_vector->co_dual_cipher_mac_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_cipher_mac_ops,
sizeof (crypto_dual_cipher_mac_ops_t));
if (desc->pd_ops_vector->co_random_ops != NULL)
kmem_free(desc->pd_ops_vector->co_random_ops,
sizeof (crypto_random_number_ops_t));
if (desc->pd_ops_vector->co_session_ops != NULL)
kmem_free(desc->pd_ops_vector->co_session_ops,
sizeof (crypto_session_ops_t));
if (desc->pd_ops_vector->co_object_ops != NULL)
kmem_free(desc->pd_ops_vector->co_object_ops,
sizeof (crypto_object_ops_t));
if (desc->pd_ops_vector->co_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_key_ops,
sizeof (crypto_key_ops_t));
if (desc->pd_ops_vector->co_provider_ops != NULL)
kmem_free(desc->pd_ops_vector->co_provider_ops,
sizeof (crypto_provider_management_ops_t));
if (desc->pd_ops_vector->co_ctx_ops != NULL)
kmem_free(desc->pd_ops_vector->co_ctx_ops,
sizeof (crypto_ctx_ops_t));
if (desc->pd_ops_vector->co_mech_ops != NULL)
kmem_free(desc->pd_ops_vector->co_mech_ops,
sizeof (crypto_mech_ops_t));
if (desc->pd_ops_vector->co_nostore_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_nostore_key_ops,
sizeof (crypto_nostore_key_ops_t));
kmem_free(desc->pd_ops_vector, sizeof (crypto_ops_t));
}
if (desc->pd_mechanisms != NULL)
/* free the memory associated with the mechanism info's */
kmem_free(desc->pd_mechanisms, sizeof (crypto_mech_info_t) *
desc->pd_mech_list_count);
if (desc->pd_sched_info.ks_taskq != NULL)
taskq_destroy(desc->pd_sched_info.ks_taskq);
kmem_free(desc, sizeof (kcf_provider_desc_t));
}
/*
* Returns an array of hardware and logical provider descriptors,
* a.k.a the PKCS#11 slot list. A REFHOLD is done on each descriptor
* before the array is returned. The entire table can be freed by
* calling kcf_free_provider_tab().
*/
int
kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array,
boolean_t unverified)
{
kcf_provider_desc_t *prov_desc;
kcf_provider_desc_t **p = NULL;
char *last;
uint_t cnt = 0;
uint_t i, j;
int rval = CRYPTO_SUCCESS;
size_t n, final_size;
/* count the providers */
mutex_enter(&prov_tab_mutex);
for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
cnt++;
}
}
}
mutex_exit(&prov_tab_mutex);
if (cnt == 0)
goto out;
n = cnt * sizeof (kcf_provider_desc_t *);
again:
p = kmem_zalloc(n, KM_SLEEP);
/* pointer to last entry in the array */
last = (char *)&p[cnt-1];
mutex_enter(&prov_tab_mutex);
/* fill the slot list */
for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
if ((char *)&p[j] > last) {
mutex_exit(&prov_tab_mutex);
kcf_free_provider_tab(cnt, p);
n = n << 1;
cnt = cnt << 1;
goto again;
}
p[j++] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
}
}
}
mutex_exit(&prov_tab_mutex);
final_size = j * sizeof (kcf_provider_desc_t *);
cnt = j;
ASSERT(final_size <= n);
/* check if buffer we allocated is too large */
if (final_size < n) {
char *final_buffer = NULL;
if (final_size > 0) {
final_buffer = kmem_alloc(final_size, KM_SLEEP);
bcopy(p, final_buffer, final_size);
}
kmem_free(p, n);
p = (kcf_provider_desc_t **)final_buffer;
}
out:
*count = cnt;
*array = p;
return (rval);
}
/*
* Free an array of hardware provider descriptors. A REFRELE
* is done on each descriptor before the table is freed.
*/
void
kcf_free_provider_tab(uint_t count, kcf_provider_desc_t **array)
{
kcf_provider_desc_t *prov_desc;
int i;
for (i = 0; i < count; i++) {
if ((prov_desc = array[i]) != NULL) {
KCF_PROV_REFRELE(prov_desc);
}
}
kmem_free(array, count * sizeof (kcf_provider_desc_t *));
}
/*
* Returns in the location pointed to by pd a pointer to the descriptor
* for the software provider for the specified mechanism.
* The provider descriptor is returned held and it is the caller's
* responsibility to release it when done. The mechanism entry
* is returned if the optional argument mep is non NULL.
*
* Returns one of the CRYPTO_ * error codes on failure, and
* CRYPTO_SUCCESS on success.
*/
int
kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
kcf_mech_entry_t **mep, boolean_t log_warn)
{
kcf_mech_entry_t *me;
/* get the mechanism entry for this mechanism */
if (kcf_get_mech_entry(mech_type, &me) != KCF_SUCCESS)
return (CRYPTO_MECHANISM_INVALID);
/*
* Get the software provider for this mechanism.
* Lock the mech_entry until we grab the 'pd'.
*/
mutex_enter(&me->me_mutex);
if (me->me_sw_prov == NULL ||
(*pd = me->me_sw_prov->pm_prov_desc) == NULL) {
/* no SW provider for this mechanism */
if (log_warn)
cmn_err(CE_WARN, "no SW provider for \"%s\"\n",
me->me_name);
mutex_exit(&me->me_mutex);
return (CRYPTO_MECH_NOT_SUPPORTED);
}
KCF_PROV_REFHOLD(*pd);
mutex_exit(&me->me_mutex);
if (mep != NULL)
*mep = me;
return (CRYPTO_SUCCESS);
}

1763
module/icp/core/kcf_sched.c Normal file

File diff suppressed because it is too large Load Diff

152
module/icp/illumos-crypto.c Normal file
View File

@ -0,0 +1,152 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, Datto, Inc. All rights reserved.
*/
#ifdef _KERNEL
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#else
#define __exit
#define __init
#endif
#include <sys/crypto/common.h>
#include <sys/crypto/api.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
#include <sys/modhash_impl.h>
#include <sys/crypto/icp.h>
/*
* Changes made to the original Illumos Crypto Layer for the ICP:
*
* Several changes were needed to allow the Illumos Crypto Layer
* to work in the Linux kernel. Almost all of the changes fall into
* one of the following categories:
*
* 1) Moving the syntax to the C90: This was mostly a matter of
* changing func() definitions to func(void). In a few cases,
* initializations of structs with unions needed to have brackets
* added.
*
* 2) Changes to allow userspace compilation: The ICP is meant to be
* compiled and used in both userspace and kernel space (for ztest and
* libzfs), so the _KERNEL macros did not make sense anymore. For the
* same reason, many header includes were also changed to use
* sys/zfs_context.h
*
* 3) Moving to a statically compiled architecture: At some point in
* the future it may make sense to have encryption algorithms that are
* loadable into the ICP at runtime via separate kernel modules.
* However, considering that this code will probably not see much use
* outside of zfs and zfs encryption only requires aes and sha256
* algorithms it seemed like more trouble than it was worth to port over
* Illumos's kernel module structure to a Linux kernel module. In
* addition, The Illumos code related to keeping track of kernel modules
* is very much tied to the Illumos OS and proved difficult to port to
* Linux. Therefore, the structure of the ICP was simplified to work
* statically and several pieces of code responsible for keeping track
* of Illumos kernel modules were removed and simplified. All module
* initialization and destruction is now called in this file during
* Linux kernel module loading and unloading.
*
* 4) Adding destructors: The Illumos Crypto Layer is built into
* the Illumos kernel and is not meant to be unloaded. Some destructors
* were added to allow the ICP to be unloaded without leaking
* structures.
*
* 5) Removing CRYPTO_DATA_MBLK related structures and code:
* crypto_data_t can have 3 formats, CRYPTO_DATA_RAW, CRYPTO_DATA_UIO,
* and CRYPTO_DATA_MBLK. ZFS only requires the first 2 formats, as the
* last one is related to streamed data. To simplify the port, code
* related to this format was removed.
*
* 6) Changes for architecture specific code: Some changes were needed
* to make architecture specific assembly compile. The biggest change
* here was to functions related to detecting CPU capabilities for amd64.
* The Illumos Crypto Layer used called into the Illumos kernel's API
* to discover these. They have been converted to instead use the
* 'cpuid' instruction as per the Intel spec. In addition, references to
* the sun4u' and sparc architectures have been removed so that these
* will use the generic implementation.
*
* 7) Removing sha384 and sha512 code: The sha code was actually very
* wasy to port. However, the generic sha384 and sha512 code actually
* exceeds the stack size on arm and powerpc architectures. In an effort
* to remove warnings, this code was removed.
*
* 8) Change large allocations from kmem_alloc() to vmem_alloc(): In
* testing the ICP with the ZFS encryption code, a few allocations were
* found that could potentially be very large. These caused the SPL to
* throw warnings and so they were changed to use vmem_alloc().
*
* 9) Makefiles: Makefiles were added that would work with the existing
* ZFS Makefiles.
*/
void __exit
icp_fini(void)
{
sha2_mod_fini();
sha1_mod_fini();
aes_mod_fini();
kcf_sched_destroy();
kcf_prov_tab_destroy();
kcf_destroy_mech_tabs();
mod_hash_fini();
}
/* roughly equivalent to kcf.c: _init() */
int __init
icp_init(void)
{
/* initialize the mod hash module */
mod_hash_init();
/* initialize the mechanisms tables supported out-of-the-box */
kcf_init_mech_tabs();
/* initialize the providers tables */
kcf_prov_tab_init();
/*
* Initialize scheduling structures. Note that this does NOT
* start any threads since it might not be safe to do so.
*/
kcf_sched_init();
/* initialize algorithms */
aes_mod_init();
sha1_mod_init();
sha2_mod_init();
return (0);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
module_exit(icp_fini);
module_init(icp_init);
MODULE_LICENSE("CDDL");
#endif

View File

@ -0,0 +1,170 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _AES_IMPL_H
#define _AES_IMPL_H
/*
* Common definitions used by AES.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
/* Similar to sysmacros.h IS_P2ALIGNED, but checks two pointers: */
#define IS_P2ALIGNED2(v, w, a) \
((((uintptr_t)(v) | (uintptr_t)(w)) & ((uintptr_t)(a) - 1)) == 0)
#define AES_BLOCK_LEN 16 /* bytes */
/* Round constant length, in number of 32-bit elements: */
#define RC_LENGTH (5 * ((AES_BLOCK_LEN) / 4 - 2))
#define AES_COPY_BLOCK(src, dst) \
(dst)[0] = (src)[0]; \
(dst)[1] = (src)[1]; \
(dst)[2] = (src)[2]; \
(dst)[3] = (src)[3]; \
(dst)[4] = (src)[4]; \
(dst)[5] = (src)[5]; \
(dst)[6] = (src)[6]; \
(dst)[7] = (src)[7]; \
(dst)[8] = (src)[8]; \
(dst)[9] = (src)[9]; \
(dst)[10] = (src)[10]; \
(dst)[11] = (src)[11]; \
(dst)[12] = (src)[12]; \
(dst)[13] = (src)[13]; \
(dst)[14] = (src)[14]; \
(dst)[15] = (src)[15]
#define AES_XOR_BLOCK(src, dst) \
(dst)[0] ^= (src)[0]; \
(dst)[1] ^= (src)[1]; \
(dst)[2] ^= (src)[2]; \
(dst)[3] ^= (src)[3]; \
(dst)[4] ^= (src)[4]; \
(dst)[5] ^= (src)[5]; \
(dst)[6] ^= (src)[6]; \
(dst)[7] ^= (src)[7]; \
(dst)[8] ^= (src)[8]; \
(dst)[9] ^= (src)[9]; \
(dst)[10] ^= (src)[10]; \
(dst)[11] ^= (src)[11]; \
(dst)[12] ^= (src)[12]; \
(dst)[13] ^= (src)[13]; \
(dst)[14] ^= (src)[14]; \
(dst)[15] ^= (src)[15]
/* AES key size definitions */
#define AES_MINBITS 128
#define AES_MINBYTES ((AES_MINBITS) >> 3)
#define AES_MAXBITS 256
#define AES_MAXBYTES ((AES_MAXBITS) >> 3)
#define AES_MIN_KEY_BYTES ((AES_MINBITS) >> 3)
#define AES_MAX_KEY_BYTES ((AES_MAXBITS) >> 3)
#define AES_192_KEY_BYTES 24
#define AES_IV_LEN 16
/* AES key schedule may be implemented with 32- or 64-bit elements: */
#define AES_32BIT_KS 32
#define AES_64BIT_KS 64
#define MAX_AES_NR 14 /* Maximum number of rounds */
#define MAX_AES_NB 4 /* Number of columns comprising a state */
typedef union {
#ifdef sun4u
uint64_t ks64[((MAX_AES_NR) + 1) * (MAX_AES_NB)];
#endif
uint32_t ks32[((MAX_AES_NR) + 1) * (MAX_AES_NB)];
} aes_ks_t;
/* aes_key.flags value: */
#define INTEL_AES_NI_CAPABLE 0x1 /* AES-NI instructions present */
typedef struct aes_key aes_key_t;
struct aes_key {
aes_ks_t encr_ks; /* encryption key schedule */
aes_ks_t decr_ks; /* decryption key schedule */
#ifdef __amd64
long double align128; /* Align fields above for Intel AES-NI */
int flags; /* implementation-dependent flags */
#endif /* __amd64 */
int nr; /* number of rounds (10, 12, or 14) */
int type; /* key schedule size (32 or 64 bits) */
};
/*
* Core AES functions.
* ks and keysched are pointers to aes_key_t.
* They are declared void* as they are intended to be opaque types.
* Use function aes_alloc_keysched() to allocate memory for ks and keysched.
*/
extern void *aes_alloc_keysched(size_t *size, int kmflag);
extern void aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits,
void *keysched);
extern int aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct);
extern int aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt);
/*
* AES mode functions.
* The first 2 functions operate on 16-byte AES blocks.
*/
extern void aes_copy_block(uint8_t *in, uint8_t *out);
extern void aes_xor_block(uint8_t *data, uint8_t *dst);
/* Note: ctx is a pointer to aes_ctx_t defined in modes.h */
extern int aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
crypto_data_t *out);
extern int aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
crypto_data_t *out);
/*
* The following definitions and declarations are only used by AES FIPS POST
*/
#ifdef _AES_IMPL
typedef enum aes_mech_type {
AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */
AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */
AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */
AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */
AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */
AES_GCM_MECH_INFO_TYPE, /* SUN_CKM_AES_GCM */
AES_GMAC_MECH_INFO_TYPE /* SUN_CKM_AES_GMAC */
} aes_mech_type_t;
#endif /* _AES_IMPL */
#ifdef __cplusplus
}
#endif
#endif /* _AES_IMPL_H */

View File

@ -0,0 +1,385 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _COMMON_CRYPTO_MODES_H
#define _COMMON_CRYPTO_MODES_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#define ECB_MODE 0x00000002
#define CBC_MODE 0x00000004
#define CTR_MODE 0x00000008
#define CCM_MODE 0x00000010
#define GCM_MODE 0x00000020
#define GMAC_MODE 0x00000040
/*
* cc_keysched: Pointer to key schedule.
*
* cc_keysched_len: Length of the key schedule.
*
* cc_remainder: This is for residual data, i.e. data that can't
* be processed because there are too few bytes.
* Must wait until more data arrives.
*
* cc_remainder_len: Number of bytes in cc_remainder.
*
* cc_iv: Scratch buffer that sometimes contains the IV.
*
* cc_lastp: Pointer to previous block of ciphertext.
*
* cc_copy_to: Pointer to where encrypted residual data needs
* to be copied.
*
* cc_flags: PROVIDER_OWNS_KEY_SCHEDULE
* When a context is freed, it is necessary
* to know whether the key schedule was allocated
* by the caller, or internally, e.g. an init routine.
* If allocated by the latter, then it needs to be freed.
*
* ECB_MODE, CBC_MODE, CTR_MODE, or CCM_MODE
*/
struct common_ctx {
void *cc_keysched;
size_t cc_keysched_len;
uint64_t cc_iv[2];
uint64_t cc_remainder[2];
size_t cc_remainder_len;
uint8_t *cc_lastp;
uint8_t *cc_copy_to;
uint32_t cc_flags;
};
typedef struct common_ctx common_ctx_t;
typedef struct ecb_ctx {
struct common_ctx ecb_common;
uint64_t ecb_lastblock[2];
} ecb_ctx_t;
#define ecb_keysched ecb_common.cc_keysched
#define ecb_keysched_len ecb_common.cc_keysched_len
#define ecb_iv ecb_common.cc_iv
#define ecb_remainder ecb_common.cc_remainder
#define ecb_remainder_len ecb_common.cc_remainder_len
#define ecb_lastp ecb_common.cc_lastp
#define ecb_copy_to ecb_common.cc_copy_to
#define ecb_flags ecb_common.cc_flags
typedef struct cbc_ctx {
struct common_ctx cbc_common;
uint64_t cbc_lastblock[2];
} cbc_ctx_t;
#define cbc_keysched cbc_common.cc_keysched
#define cbc_keysched_len cbc_common.cc_keysched_len
#define cbc_iv cbc_common.cc_iv
#define cbc_remainder cbc_common.cc_remainder
#define cbc_remainder_len cbc_common.cc_remainder_len
#define cbc_lastp cbc_common.cc_lastp
#define cbc_copy_to cbc_common.cc_copy_to
#define cbc_flags cbc_common.cc_flags
/*
* ctr_lower_mask Bit-mask for lower 8 bytes of counter block.
* ctr_upper_mask Bit-mask for upper 8 bytes of counter block.
*/
typedef struct ctr_ctx {
struct common_ctx ctr_common;
uint64_t ctr_lower_mask;
uint64_t ctr_upper_mask;
uint32_t ctr_tmp[4];
} ctr_ctx_t;
/*
* ctr_cb Counter block.
*/
#define ctr_keysched ctr_common.cc_keysched
#define ctr_keysched_len ctr_common.cc_keysched_len
#define ctr_cb ctr_common.cc_iv
#define ctr_remainder ctr_common.cc_remainder
#define ctr_remainder_len ctr_common.cc_remainder_len
#define ctr_lastp ctr_common.cc_lastp
#define ctr_copy_to ctr_common.cc_copy_to
#define ctr_flags ctr_common.cc_flags
/*
*
* ccm_mac_len: Stores length of the MAC in CCM mode.
* ccm_mac_buf: Stores the intermediate value for MAC in CCM encrypt.
* In CCM decrypt, stores the input MAC value.
* ccm_data_len: Length of the plaintext for CCM mode encrypt, or
* length of the ciphertext for CCM mode decrypt.
* ccm_processed_data_len:
* Length of processed plaintext in CCM mode encrypt,
* or length of processed ciphertext for CCM mode decrypt.
* ccm_processed_mac_len:
* Length of MAC data accumulated in CCM mode decrypt.
*
* ccm_pt_buf: Only used in CCM mode decrypt. It stores the
* decrypted plaintext to be returned when
* MAC verification succeeds in decrypt_final.
* Memory for this should be allocated in the AES module.
*
*/
typedef struct ccm_ctx {
struct common_ctx ccm_common;
uint32_t ccm_tmp[4];
size_t ccm_mac_len;
uint64_t ccm_mac_buf[2];
size_t ccm_data_len;
size_t ccm_processed_data_len;
size_t ccm_processed_mac_len;
uint8_t *ccm_pt_buf;
uint64_t ccm_mac_input_buf[2];
uint64_t ccm_counter_mask;
} ccm_ctx_t;
#define ccm_keysched ccm_common.cc_keysched
#define ccm_keysched_len ccm_common.cc_keysched_len
#define ccm_cb ccm_common.cc_iv
#define ccm_remainder ccm_common.cc_remainder
#define ccm_remainder_len ccm_common.cc_remainder_len
#define ccm_lastp ccm_common.cc_lastp
#define ccm_copy_to ccm_common.cc_copy_to
#define ccm_flags ccm_common.cc_flags
/*
* gcm_tag_len: Length of authentication tag.
*
* gcm_ghash: Stores output from the GHASH function.
*
* gcm_processed_data_len:
* Length of processed plaintext (encrypt) or
* length of processed ciphertext (decrypt).
*
* gcm_pt_buf: Stores the decrypted plaintext returned by
* decrypt_final when the computed authentication
* tag matches the user supplied tag.
*
* gcm_pt_buf_len: Length of the plaintext buffer.
*
* gcm_H: Subkey.
*
* gcm_J0: Pre-counter block generated from the IV.
*
* gcm_len_a_len_c: 64-bit representations of the bit lengths of
* AAD and ciphertext.
*
* gcm_kmflag: Current value of kmflag. Used only for allocating
* the plaintext buffer during decryption.
*/
typedef struct gcm_ctx {
struct common_ctx gcm_common;
size_t gcm_tag_len;
size_t gcm_processed_data_len;
size_t gcm_pt_buf_len;
uint32_t gcm_tmp[4];
uint64_t gcm_ghash[2];
uint64_t gcm_H[2];
uint64_t gcm_J0[2];
uint64_t gcm_len_a_len_c[2];
uint8_t *gcm_pt_buf;
int gcm_kmflag;
} gcm_ctx_t;
#define gcm_keysched gcm_common.cc_keysched
#define gcm_keysched_len gcm_common.cc_keysched_len
#define gcm_cb gcm_common.cc_iv
#define gcm_remainder gcm_common.cc_remainder
#define gcm_remainder_len gcm_common.cc_remainder_len
#define gcm_lastp gcm_common.cc_lastp
#define gcm_copy_to gcm_common.cc_copy_to
#define gcm_flags gcm_common.cc_flags
#define AES_GMAC_IV_LEN 12
#define AES_GMAC_TAG_BITS 128
typedef struct aes_ctx {
union {
ecb_ctx_t acu_ecb;
cbc_ctx_t acu_cbc;
ctr_ctx_t acu_ctr;
ccm_ctx_t acu_ccm;
gcm_ctx_t acu_gcm;
} acu;
} aes_ctx_t;
#define ac_flags acu.acu_ecb.ecb_common.cc_flags
#define ac_remainder_len acu.acu_ecb.ecb_common.cc_remainder_len
#define ac_keysched acu.acu_ecb.ecb_common.cc_keysched
#define ac_keysched_len acu.acu_ecb.ecb_common.cc_keysched_len
#define ac_iv acu.acu_ecb.ecb_common.cc_iv
#define ac_lastp acu.acu_ecb.ecb_common.cc_lastp
#define ac_pt_buf acu.acu_ccm.ccm_pt_buf
#define ac_mac_len acu.acu_ccm.ccm_mac_len
#define ac_data_len acu.acu_ccm.ccm_data_len
#define ac_processed_mac_len acu.acu_ccm.ccm_processed_mac_len
#define ac_processed_data_len acu.acu_ccm.ccm_processed_data_len
#define ac_tag_len acu.acu_gcm.gcm_tag_len
typedef struct blowfish_ctx {
union {
ecb_ctx_t bcu_ecb;
cbc_ctx_t bcu_cbc;
} bcu;
} blowfish_ctx_t;
#define bc_flags bcu.bcu_ecb.ecb_common.cc_flags
#define bc_remainder_len bcu.bcu_ecb.ecb_common.cc_remainder_len
#define bc_keysched bcu.bcu_ecb.ecb_common.cc_keysched
#define bc_keysched_len bcu.bcu_ecb.ecb_common.cc_keysched_len
#define bc_iv bcu.bcu_ecb.ecb_common.cc_iv
#define bc_lastp bcu.bcu_ecb.ecb_common.cc_lastp
typedef struct des_ctx {
union {
ecb_ctx_t dcu_ecb;
cbc_ctx_t dcu_cbc;
} dcu;
} des_ctx_t;
#define dc_flags dcu.dcu_ecb.ecb_common.cc_flags
#define dc_remainder_len dcu.dcu_ecb.ecb_common.cc_remainder_len
#define dc_keysched dcu.dcu_ecb.ecb_common.cc_keysched
#define dc_keysched_len dcu.dcu_ecb.ecb_common.cc_keysched_len
#define dc_iv dcu.dcu_ecb.ecb_common.cc_iv
#define dc_lastp dcu.dcu_ecb.ecb_common.cc_lastp
extern int ecb_cipher_contiguous_blocks(ecb_ctx_t *, char *, size_t,
crypto_data_t *, size_t, int (*cipher)(const void *, const uint8_t *,
uint8_t *));
extern int cbc_encrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*encrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int cbc_decrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*decrypt)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ctr_mode_contiguous_blocks(ctr_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*cipher)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
int ccm_encrypt_final(ccm_ctx_t *, crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
int gcm_encrypt_final(gcm_ctx_t *, crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ccm_decrypt_final(ccm_ctx_t *, crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int gcm_decrypt_final(gcm_ctx_t *, crypto_data_t *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int ctr_mode_final(ctr_ctx_t *, crypto_data_t *,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *));
extern int cbc_init_ctx(cbc_ctx_t *, char *, size_t, size_t,
void (*copy_block)(uint8_t *, uint64_t *));
extern int ctr_init_ctx(ctr_ctx_t *, ulong_t, uint8_t *,
void (*copy_block)(uint8_t *, uint8_t *));
extern int ccm_init_ctx(ccm_ctx_t *, char *, int, boolean_t, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int gcm_init_ctx(gcm_ctx_t *, char *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern int gmac_init_ctx(gcm_ctx_t *, char *, size_t,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
extern void calculate_ccm_mac(ccm_ctx_t *, uint8_t *,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *));
extern void gcm_mul(uint64_t *, uint64_t *, uint64_t *);
extern void crypto_init_ptrs(crypto_data_t *, void **, offset_t *);
extern void crypto_get_ptrs(crypto_data_t *, void **, offset_t *,
uint8_t **, size_t *, uint8_t **, size_t);
extern void *ecb_alloc_ctx(int);
extern void *cbc_alloc_ctx(int);
extern void *ctr_alloc_ctx(int);
extern void *ccm_alloc_ctx(int);
extern void *gcm_alloc_ctx(int);
extern void *gmac_alloc_ctx(int);
extern void crypto_free_mode_ctx(void *);
extern void gcm_set_kmflag(gcm_ctx_t *, int);
#ifdef __cplusplus
}
#endif
#endif /* _COMMON_CRYPTO_MODES_H */

View File

@ -0,0 +1,61 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SHA1_H
#define _SYS_SHA1_H
#include <sys/types.h> /* for uint_* */
#ifdef __cplusplus
extern "C" {
#endif
/*
* NOTE: n2rng (Niagara2 RNG driver) accesses the state field of
* SHA1_CTX directly. NEVER change this structure without verifying
* compatiblity with n2rng. The important thing is that the state
* must be in a field declared as uint32_t state[5].
*/
/* SHA-1 context. */
typedef struct {
uint32_t state[5]; /* state (ABCDE) */
uint32_t count[2]; /* number of bits, modulo 2^64 (msb first) */
union {
uint8_t buf8[64]; /* undigested input */
uint32_t buf32[16]; /* realigned input */
} buf_un;
} SHA1_CTX;
#define SHA1_DIGEST_LENGTH 20
void SHA1Init(SHA1_CTX *);
void SHA1Update(SHA1_CTX *, const void *, size_t);
void SHA1Final(void *, SHA1_CTX *);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SHA1_H */

View File

@ -0,0 +1,65 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 1998, by Sun Microsystems, Inc.
* All rights reserved.
*/
#ifndef _SYS_SHA1_CONSTS_H
#define _SYS_SHA1_CONSTS_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* as explained in sha1.c, loading 32-bit constants on a sparc is expensive
* since it involves both a `sethi' and an `or'. thus, we instead use `ld'
* to load the constants from an array called `sha1_consts'. however, on
* intel (and perhaps other processors), it is cheaper to load the constant
* directly. thus, the c code in SHA1Transform() uses the macro SHA1_CONST()
* which either expands to a constant or an array reference, depending on
* the architecture the code is being compiled for.
*/
#include <sys/types.h> /* uint32_t */
extern const uint32_t sha1_consts[];
#if defined(__sparc)
#define SHA1_CONST(x) (sha1_consts[x])
#else
#define SHA1_CONST(x) (SHA1_CONST_ ## x)
#endif
/* constants, as provided in FIPS 180-1 */
#define SHA1_CONST_0 0x5a827999U
#define SHA1_CONST_1 0x6ed9eba1U
#define SHA1_CONST_2 0x8f1bbcdcU
#define SHA1_CONST_3 0xca62c1d6U
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SHA1_CONSTS_H */

View File

@ -0,0 +1,73 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SHA1_IMPL_H
#define _SHA1_IMPL_H
#ifdef __cplusplus
extern "C" {
#endif
#define SHA1_HASH_SIZE 20 /* SHA_1 digest length in bytes */
#define SHA1_DIGEST_LENGTH 20 /* SHA1 digest length in bytes */
#define SHA1_HMAC_BLOCK_SIZE 64 /* SHA1-HMAC block size */
#define SHA1_HMAC_MIN_KEY_LEN 1 /* SHA1-HMAC min key length in bytes */
#define SHA1_HMAC_MAX_KEY_LEN INT_MAX /* SHA1-HMAC max key length in bytes */
#define SHA1_HMAC_INTS_PER_BLOCK (SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
/*
* CSPI information (entry points, provider info, etc.)
*/
typedef enum sha1_mech_type {
SHA1_MECH_INFO_TYPE, /* SUN_CKM_SHA1 */
SHA1_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA1_HMAC */
SHA1_HMAC_GEN_MECH_INFO_TYPE /* SUN_CKM_SHA1_HMAC_GENERAL */
} sha1_mech_type_t;
/*
* Context for SHA1 mechanism.
*/
typedef struct sha1_ctx {
sha1_mech_type_t sc_mech_type; /* type of context */
SHA1_CTX sc_sha1_ctx; /* SHA1 context */
} sha1_ctx_t;
/*
* Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
*/
typedef struct sha1_hmac_ctx {
sha1_mech_type_t hc_mech_type; /* type of context */
uint32_t hc_digest_len; /* digest len in bytes */
SHA1_CTX hc_icontext; /* inner SHA1 context */
SHA1_CTX hc_ocontext; /* outer SHA1 context */
} sha1_hmac_ctx_t;
#ifdef __cplusplus
}
#endif
#endif /* _SHA1_IMPL_H */

View File

@ -0,0 +1,116 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright 2013 Saso Kiselkov. All rights reserved. */
#ifndef _SYS_SHA2_H
#define _SYS_SHA2_H
#include <sys/types.h> /* for uint_* */
#ifdef __cplusplus
extern "C" {
#endif
#define SHA2_HMAC_MIN_KEY_LEN 1 /* SHA2-HMAC min key length in bytes */
#define SHA2_HMAC_MAX_KEY_LEN INT_MAX /* SHA2-HMAC max key length in bytes */
#define SHA256_DIGEST_LENGTH 32 /* SHA256 digest length in bytes */
#define SHA256_HMAC_BLOCK_SIZE 64 /* SHA256-HMAC block size */
#define SHA256 0
#define SHA256_HMAC 1
#define SHA256_HMAC_GEN 2
/*
* SHA2 context.
* The contents of this structure are a private interface between the
* Init/Update/Final calls of the functions defined below.
* Callers must never attempt to read or write any of the fields
* in this structure directly.
*/
typedef struct {
uint32_t algotype; /* Algorithm Type */
/* state (ABCDEFGH) */
union {
uint32_t s32[8]; /* for SHA256 */
uint64_t s64[8]; /* for SHA384/512 */
} state;
/* number of bits */
union {
uint32_t c32[2]; /* for SHA256 , modulo 2^64 */
uint64_t c64[2]; /* for SHA384/512, modulo 2^128 */
} count;
union {
uint8_t buf8[128]; /* undigested input */
uint32_t buf32[32]; /* realigned input */
uint64_t buf64[16]; /* realigned input */
} buf_un;
} SHA2_CTX;
typedef SHA2_CTX SHA256_CTX;
typedef SHA2_CTX SHA384_CTX;
typedef SHA2_CTX SHA512_CTX;
extern void SHA2Init(uint64_t mech, SHA2_CTX *);
extern void SHA2Update(SHA2_CTX *, const void *, size_t);
extern void SHA2Final(void *, SHA2_CTX *);
extern void SHA256Init(SHA256_CTX *);
extern void SHA256Update(SHA256_CTX *, const void *, size_t);
extern void SHA256Final(void *, SHA256_CTX *);
#ifdef _SHA2_IMPL
/*
* The following types/functions are all private to the implementation
* of the SHA2 functions and must not be used by consumers of the interface
*/
/*
* List of support mechanisms in this module.
*
* It is important to note that in the module, division or modulus calculations
* are used on the enumerated type to determine which mechanism is being used;
* therefore, changing the order or additional mechanisms should be done
* carefully
*/
typedef enum sha2_mech_type {
SHA256_MECH_INFO_TYPE, /* SUN_CKM_SHA256 */
SHA256_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA256_HMAC */
SHA256_HMAC_GEN_MECH_INFO_TYPE, /* SUN_CKM_SHA256_HMAC_GENERAL */
} sha2_mech_type_t;
#endif /* _SHA2_IMPL */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SHA2_H */

View File

@ -0,0 +1,219 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SHA2_CONSTS_H
#define _SYS_SHA2_CONSTS_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* Loading 32-bit constants on a sparc is expensive since it involves both
* a `sethi' and an `or'. thus, we instead use `ld' to load the constants
* from an array called `sha2_consts'. however, on intel (and perhaps other
* processors), it is cheaper to load the constant directly. thus, the c
* code in SHA transform functions uses the macro SHA2_CONST() which either
* expands to a constant or an array reference, depending on
* the architecture the code is being compiled for.
*
* SHA512 constants are used for SHA384
*/
#include <sys/types.h> /* uint32_t */
extern const uint32_t sha256_consts[];
extern const uint64_t sha512_consts[];
#if defined(__sparc)
#define SHA256_CONST(x) (sha256_consts[x])
#define SHA512_CONST(x) (sha512_consts[x])
#else
#define SHA256_CONST(x) (SHA256_CONST_ ## x)
#define SHA512_CONST(x) (SHA512_CONST_ ## x)
#endif
/* constants, as provided in FIPS 180-2 */
#define SHA256_CONST_0 0x428a2f98U
#define SHA256_CONST_1 0x71374491U
#define SHA256_CONST_2 0xb5c0fbcfU
#define SHA256_CONST_3 0xe9b5dba5U
#define SHA256_CONST_4 0x3956c25bU
#define SHA256_CONST_5 0x59f111f1U
#define SHA256_CONST_6 0x923f82a4U
#define SHA256_CONST_7 0xab1c5ed5U
#define SHA256_CONST_8 0xd807aa98U
#define SHA256_CONST_9 0x12835b01U
#define SHA256_CONST_10 0x243185beU
#define SHA256_CONST_11 0x550c7dc3U
#define SHA256_CONST_12 0x72be5d74U
#define SHA256_CONST_13 0x80deb1feU
#define SHA256_CONST_14 0x9bdc06a7U
#define SHA256_CONST_15 0xc19bf174U
#define SHA256_CONST_16 0xe49b69c1U
#define SHA256_CONST_17 0xefbe4786U
#define SHA256_CONST_18 0x0fc19dc6U
#define SHA256_CONST_19 0x240ca1ccU
#define SHA256_CONST_20 0x2de92c6fU
#define SHA256_CONST_21 0x4a7484aaU
#define SHA256_CONST_22 0x5cb0a9dcU
#define SHA256_CONST_23 0x76f988daU
#define SHA256_CONST_24 0x983e5152U
#define SHA256_CONST_25 0xa831c66dU
#define SHA256_CONST_26 0xb00327c8U
#define SHA256_CONST_27 0xbf597fc7U
#define SHA256_CONST_28 0xc6e00bf3U
#define SHA256_CONST_29 0xd5a79147U
#define SHA256_CONST_30 0x06ca6351U
#define SHA256_CONST_31 0x14292967U
#define SHA256_CONST_32 0x27b70a85U
#define SHA256_CONST_33 0x2e1b2138U
#define SHA256_CONST_34 0x4d2c6dfcU
#define SHA256_CONST_35 0x53380d13U
#define SHA256_CONST_36 0x650a7354U
#define SHA256_CONST_37 0x766a0abbU
#define SHA256_CONST_38 0x81c2c92eU
#define SHA256_CONST_39 0x92722c85U
#define SHA256_CONST_40 0xa2bfe8a1U
#define SHA256_CONST_41 0xa81a664bU
#define SHA256_CONST_42 0xc24b8b70U
#define SHA256_CONST_43 0xc76c51a3U
#define SHA256_CONST_44 0xd192e819U
#define SHA256_CONST_45 0xd6990624U
#define SHA256_CONST_46 0xf40e3585U
#define SHA256_CONST_47 0x106aa070U
#define SHA256_CONST_48 0x19a4c116U
#define SHA256_CONST_49 0x1e376c08U
#define SHA256_CONST_50 0x2748774cU
#define SHA256_CONST_51 0x34b0bcb5U
#define SHA256_CONST_52 0x391c0cb3U
#define SHA256_CONST_53 0x4ed8aa4aU
#define SHA256_CONST_54 0x5b9cca4fU
#define SHA256_CONST_55 0x682e6ff3U
#define SHA256_CONST_56 0x748f82eeU
#define SHA256_CONST_57 0x78a5636fU
#define SHA256_CONST_58 0x84c87814U
#define SHA256_CONST_59 0x8cc70208U
#define SHA256_CONST_60 0x90befffaU
#define SHA256_CONST_61 0xa4506cebU
#define SHA256_CONST_62 0xbef9a3f7U
#define SHA256_CONST_63 0xc67178f2U
#define SHA512_CONST_0 0x428a2f98d728ae22ULL
#define SHA512_CONST_1 0x7137449123ef65cdULL
#define SHA512_CONST_2 0xb5c0fbcfec4d3b2fULL
#define SHA512_CONST_3 0xe9b5dba58189dbbcULL
#define SHA512_CONST_4 0x3956c25bf348b538ULL
#define SHA512_CONST_5 0x59f111f1b605d019ULL
#define SHA512_CONST_6 0x923f82a4af194f9bULL
#define SHA512_CONST_7 0xab1c5ed5da6d8118ULL
#define SHA512_CONST_8 0xd807aa98a3030242ULL
#define SHA512_CONST_9 0x12835b0145706fbeULL
#define SHA512_CONST_10 0x243185be4ee4b28cULL
#define SHA512_CONST_11 0x550c7dc3d5ffb4e2ULL
#define SHA512_CONST_12 0x72be5d74f27b896fULL
#define SHA512_CONST_13 0x80deb1fe3b1696b1ULL
#define SHA512_CONST_14 0x9bdc06a725c71235ULL
#define SHA512_CONST_15 0xc19bf174cf692694ULL
#define SHA512_CONST_16 0xe49b69c19ef14ad2ULL
#define SHA512_CONST_17 0xefbe4786384f25e3ULL
#define SHA512_CONST_18 0x0fc19dc68b8cd5b5ULL
#define SHA512_CONST_19 0x240ca1cc77ac9c65ULL
#define SHA512_CONST_20 0x2de92c6f592b0275ULL
#define SHA512_CONST_21 0x4a7484aa6ea6e483ULL
#define SHA512_CONST_22 0x5cb0a9dcbd41fbd4ULL
#define SHA512_CONST_23 0x76f988da831153b5ULL
#define SHA512_CONST_24 0x983e5152ee66dfabULL
#define SHA512_CONST_25 0xa831c66d2db43210ULL
#define SHA512_CONST_26 0xb00327c898fb213fULL
#define SHA512_CONST_27 0xbf597fc7beef0ee4ULL
#define SHA512_CONST_28 0xc6e00bf33da88fc2ULL
#define SHA512_CONST_29 0xd5a79147930aa725ULL
#define SHA512_CONST_30 0x06ca6351e003826fULL
#define SHA512_CONST_31 0x142929670a0e6e70ULL
#define SHA512_CONST_32 0x27b70a8546d22ffcULL
#define SHA512_CONST_33 0x2e1b21385c26c926ULL
#define SHA512_CONST_34 0x4d2c6dfc5ac42aedULL
#define SHA512_CONST_35 0x53380d139d95b3dfULL
#define SHA512_CONST_36 0x650a73548baf63deULL
#define SHA512_CONST_37 0x766a0abb3c77b2a8ULL
#define SHA512_CONST_38 0x81c2c92e47edaee6ULL
#define SHA512_CONST_39 0x92722c851482353bULL
#define SHA512_CONST_40 0xa2bfe8a14cf10364ULL
#define SHA512_CONST_41 0xa81a664bbc423001ULL
#define SHA512_CONST_42 0xc24b8b70d0f89791ULL
#define SHA512_CONST_43 0xc76c51a30654be30ULL
#define SHA512_CONST_44 0xd192e819d6ef5218ULL
#define SHA512_CONST_45 0xd69906245565a910ULL
#define SHA512_CONST_46 0xf40e35855771202aULL
#define SHA512_CONST_47 0x106aa07032bbd1b8ULL
#define SHA512_CONST_48 0x19a4c116b8d2d0c8ULL
#define SHA512_CONST_49 0x1e376c085141ab53ULL
#define SHA512_CONST_50 0x2748774cdf8eeb99ULL
#define SHA512_CONST_51 0x34b0bcb5e19b48a8ULL
#define SHA512_CONST_52 0x391c0cb3c5c95a63ULL
#define SHA512_CONST_53 0x4ed8aa4ae3418acbULL
#define SHA512_CONST_54 0x5b9cca4f7763e373ULL
#define SHA512_CONST_55 0x682e6ff3d6b2b8a3ULL
#define SHA512_CONST_56 0x748f82ee5defb2fcULL
#define SHA512_CONST_57 0x78a5636f43172f60ULL
#define SHA512_CONST_58 0x84c87814a1f0ab72ULL
#define SHA512_CONST_59 0x8cc702081a6439ecULL
#define SHA512_CONST_60 0x90befffa23631e28ULL
#define SHA512_CONST_61 0xa4506cebde82bde9ULL
#define SHA512_CONST_62 0xbef9a3f7b2c67915ULL
#define SHA512_CONST_63 0xc67178f2e372532bULL
#define SHA512_CONST_64 0xca273eceea26619cULL
#define SHA512_CONST_65 0xd186b8c721c0c207ULL
#define SHA512_CONST_66 0xeada7dd6cde0eb1eULL
#define SHA512_CONST_67 0xf57d4f7fee6ed178ULL
#define SHA512_CONST_68 0x06f067aa72176fbaULL
#define SHA512_CONST_69 0x0a637dc5a2c898a6ULL
#define SHA512_CONST_70 0x113f9804bef90daeULL
#define SHA512_CONST_71 0x1b710b35131c471bULL
#define SHA512_CONST_72 0x28db77f523047d84ULL
#define SHA512_CONST_73 0x32caab7b40c72493ULL
#define SHA512_CONST_74 0x3c9ebe0a15c9bebcULL
#define SHA512_CONST_75 0x431d67c49c100d4cULL
#define SHA512_CONST_76 0x4cc5d4becb3e42b6ULL
#define SHA512_CONST_77 0x597f299cfc657e2aULL
#define SHA512_CONST_78 0x5fcb6fab3ad6faecULL
#define SHA512_CONST_79 0x6c44198c4a475817ULL
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SHA2_CONSTS_H */

View File

@ -0,0 +1,62 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SHA2_IMPL_H
#define _SHA2_IMPL_H
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
SHA1_TYPE,
SHA256_TYPE,
SHA384_TYPE,
SHA512_TYPE
} sha2_mech_t;
/*
* Context for SHA2 mechanism.
*/
typedef struct sha2_ctx {
sha2_mech_type_t sc_mech_type; /* type of context */
SHA2_CTX sc_sha2_ctx; /* SHA2 context */
} sha2_ctx_t;
/*
* Context for SHA2 HMAC and HMAC GENERAL mechanisms.
*/
typedef struct sha2_hmac_ctx {
sha2_mech_type_t hc_mech_type; /* type of context */
uint32_t hc_digest_len; /* digest len in bytes */
SHA2_CTX hc_icontext; /* inner SHA2 context */
SHA2_CTX hc_ocontext; /* outer SHA2 context */
} sha2_hmac_ctx_t;
#ifdef __cplusplus
}
#endif
#endif /* _SHA2_IMPL_H */

View File

@ -0,0 +1,36 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_ASM_LINKAGE_H
#define _SYS_ASM_LINKAGE_H
#if defined(__i386) || defined(__amd64)
#include <sys/ia32/asm_linkage.h> /* XX64 x86/sys/asm_linkage.h */
#endif
#endif /* _SYS_ASM_LINKAGE_H */

View File

@ -0,0 +1,183 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
#ifndef _SYS_BITMAP_H
#define _SYS_BITMAP_H
#ifdef __cplusplus
extern "C" {
#endif
#if defined(__GNUC__) && defined(_ASM_INLINES) && \
(defined(__i386) || defined(__amd64))
#include <asm/bitmap.h>
#endif
/*
* Operations on bitmaps of arbitrary size
* A bitmap is a vector of 1 or more ulong_t's.
* The user of the package is responsible for range checks and keeping
* track of sizes.
*/
#ifdef _LP64
#define BT_ULSHIFT 6 /* log base 2 of BT_NBIPUL, to extract word index */
#define BT_ULSHIFT32 5 /* log base 2 of BT_NBIPUL, to extract word index */
#else
#define BT_ULSHIFT 5 /* log base 2 of BT_NBIPUL, to extract word index */
#endif
#define BT_NBIPUL (1 << BT_ULSHIFT) /* n bits per ulong_t */
#define BT_ULMASK (BT_NBIPUL - 1) /* to extract bit index */
#ifdef _LP64
#define BT_NBIPUL32 (1 << BT_ULSHIFT32) /* n bits per ulong_t */
#define BT_ULMASK32 (BT_NBIPUL32 - 1) /* to extract bit index */
#define BT_ULMAXMASK 0xffffffffffffffff /* used by bt_getlowbit */
#else
#define BT_ULMAXMASK 0xffffffff
#endif
/*
* bitmap is a ulong_t *, bitindex an index_t
*
* The macros BT_WIM and BT_BIW internal; there is no need
* for users of this package to use them.
*/
/*
* word in map
*/
#define BT_WIM(bitmap, bitindex) \
((bitmap)[(bitindex) >> BT_ULSHIFT])
/*
* bit in word
*/
#define BT_BIW(bitindex) \
(1UL << ((bitindex) & BT_ULMASK))
#ifdef _LP64
#define BT_WIM32(bitmap, bitindex) \
((bitmap)[(bitindex) >> BT_ULSHIFT32])
#define BT_BIW32(bitindex) \
(1UL << ((bitindex) & BT_ULMASK32))
#endif
/*
* These are public macros
*
* BT_BITOUL == n bits to n ulong_t's
*/
#define BT_BITOUL(nbits) \
(((nbits) + BT_NBIPUL - 1l) / BT_NBIPUL)
#define BT_SIZEOFMAP(nbits) \
(BT_BITOUL(nbits) * sizeof (ulong_t))
#define BT_TEST(bitmap, bitindex) \
((BT_WIM((bitmap), (bitindex)) & BT_BIW(bitindex)) ? 1 : 0)
#define BT_SET(bitmap, bitindex) \
{ BT_WIM((bitmap), (bitindex)) |= BT_BIW(bitindex); }
#define BT_CLEAR(bitmap, bitindex) \
{ BT_WIM((bitmap), (bitindex)) &= ~BT_BIW(bitindex); }
#ifdef _LP64
#define BT_BITOUL32(nbits) \
(((nbits) + BT_NBIPUL32 - 1l) / BT_NBIPUL32)
#define BT_SIZEOFMAP32(nbits) \
(BT_BITOUL32(nbits) * sizeof (uint_t))
#define BT_TEST32(bitmap, bitindex) \
((BT_WIM32((bitmap), (bitindex)) & BT_BIW32(bitindex)) ? 1 : 0)
#define BT_SET32(bitmap, bitindex) \
{ BT_WIM32((bitmap), (bitindex)) |= BT_BIW32(bitindex); }
#define BT_CLEAR32(bitmap, bitindex) \
{ BT_WIM32((bitmap), (bitindex)) &= ~BT_BIW32(bitindex); }
#endif /* _LP64 */
/*
* BIT_ONLYONESET is a private macro not designed for bitmaps of
* arbitrary size. u must be an unsigned integer/long. It returns
* true if one and only one bit is set in u.
*/
#define BIT_ONLYONESET(u) \
((((u) == 0) ? 0 : ((u) & ((u) - 1)) == 0))
#ifndef _ASM
/*
* return next available bit index from map with specified number of bits
*/
extern index_t bt_availbit(ulong_t *bitmap, size_t nbits);
/*
* find the highest order bit that is on, and is within or below
* the word specified by wx
*/
extern int bt_gethighbit(ulong_t *mapp, int wx);
extern int bt_range(ulong_t *bitmap, size_t *pos1, size_t *pos2,
size_t end_pos);
extern int bt_getlowbit(ulong_t *bitmap, size_t start, size_t stop);
extern void bt_copy(ulong_t *, ulong_t *, ulong_t);
/*
* find the parity
*/
extern int odd_parity(ulong_t);
/*
* Atomically set/clear bits
* Atomic exclusive operations will set "result" to "-1"
* if the bit is already set/cleared. "result" will be set
* to 0 otherwise.
*/
#define BT_ATOMIC_SET(bitmap, bitindex) \
{ atomic_or_long(&(BT_WIM(bitmap, bitindex)), BT_BIW(bitindex)); }
#define BT_ATOMIC_CLEAR(bitmap, bitindex) \
{ atomic_and_long(&(BT_WIM(bitmap, bitindex)), ~BT_BIW(bitindex)); }
#define BT_ATOMIC_SET_EXCL(bitmap, bitindex, result) \
{ result = atomic_set_long_excl(&(BT_WIM(bitmap, bitindex)), \
(bitindex) % BT_NBIPUL); }
#define BT_ATOMIC_CLEAR_EXCL(bitmap, bitindex, result) \
{ result = atomic_clear_long_excl(&(BT_WIM(bitmap, bitindex)), \
(bitindex) % BT_NBIPUL); }
/*
* Extracts bits between index h (high, inclusive) and l (low, exclusive) from
* u, which must be an unsigned integer.
*/
#define BITX(u, h, l) (((u) >> (l)) & ((1LU << ((h) - (l) + 1LU)) - 1LU))
#endif /* _ASM */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_BITMAP_H */

View File

@ -0,0 +1,137 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_ELFSIGN_H
#define _SYS_CRYPTO_ELFSIGN_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* Consolidation Private Interface for elfsign/libpkcs11/kcfd
*/
#include <sys/zfs_context.h>
/*
* Project Private structures and types used for communication between kcfd
* and KCF over the door.
*/
typedef enum ELFsign_status_e {
ELFSIGN_UNKNOWN,
ELFSIGN_SUCCESS,
ELFSIGN_FAILED,
ELFSIGN_NOTSIGNED,
ELFSIGN_INVALID_CERTPATH,
ELFSIGN_INVALID_ELFOBJ,
ELFSIGN_RESTRICTED
} ELFsign_status_t;
#define KCF_KCFD_VERSION1 1
#define SIG_MAX_LENGTH 1024
#define ELF_SIGNATURE_SECTION ".SUNW_signature"
typedef struct kcf_door_arg_s {
short da_version;
boolean_t da_iskernel;
union {
char filename[MAXPATHLEN]; /* For request */
struct kcf_door_result_s { /* For response */
ELFsign_status_t status;
uint32_t siglen;
uchar_t signature[1];
} result;
} da_u;
} kcf_door_arg_t;
typedef uint32_t filesig_vers_t;
/*
* File Signature Structure
* Applicable to ELF and other file formats
*/
struct filesignatures {
uint32_t filesig_cnt; /* count of signatures */
uint32_t filesig_pad; /* unused */
union {
char filesig_data[1];
struct filesig { /* one of these for each signature */
uint32_t filesig_size;
filesig_vers_t filesig_version;
union {
struct filesig_version1 {
uint32_t filesig_v1_dnsize;
uint32_t filesig_v1_sigsize;
uint32_t filesig_v1_oidsize;
char filesig_v1_data[1];
} filesig_v1;
struct filesig_version3 {
uint64_t filesig_v3_time;
uint32_t filesig_v3_dnsize;
uint32_t filesig_v3_sigsize;
uint32_t filesig_v3_oidsize;
char filesig_v3_data[1];
} filesig_v3;
} _u2;
} filesig_sig;
uint64_t filesig_align;
} _u1;
};
#define filesig_sig _u1.filesig_sig
#define filesig_v1_dnsize _u2.filesig_v1.filesig_v1_dnsize
#define filesig_v1_sigsize _u2.filesig_v1.filesig_v1_sigsize
#define filesig_v1_oidsize _u2.filesig_v1.filesig_v1_oidsize
#define filesig_v1_data _u2.filesig_v1.filesig_v1_data
#define filesig_v3_time _u2.filesig_v3.filesig_v3_time
#define filesig_v3_dnsize _u2.filesig_v3.filesig_v3_dnsize
#define filesig_v3_sigsize _u2.filesig_v3.filesig_v3_sigsize
#define filesig_v3_oidsize _u2.filesig_v3.filesig_v3_oidsize
#define filesig_v3_data _u2.filesig_v3.filesig_v3_data
#define filesig_ALIGN(s) (((s) + sizeof (uint64_t) - 1) & \
(-sizeof (uint64_t)))
#define filesig_next(ptr) (struct filesig *)((void *)((char *)(ptr) + \
filesig_ALIGN((ptr)->filesig_size)))
#define FILESIG_UNKNOWN 0 /* unrecognized version */
#define FILESIG_VERSION1 1 /* version1, all but sig section */
#define FILESIG_VERSION2 2 /* version1 format, SHF_ALLOC only */
#define FILESIG_VERSION3 3 /* version3, all but sig section */
#define FILESIG_VERSION4 4 /* version3 format, SHF_ALLOC only */
#define _PATH_KCFD_DOOR "/etc/svc/volatile/kcfd_door"
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_ELFSIGN_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,136 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_IOCTLADMIN_H
#define _SYS_CRYPTO_IOCTLADMIN_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#define ADMIN_IOCTL_DEVICE "/dev/cryptoadm"
#define CRYPTOADMIN(x) (('y' << 8) | (x))
/*
* Administrative IOCTLs
*/
typedef struct crypto_get_dev_list {
uint_t dl_return_value;
uint_t dl_dev_count;
crypto_dev_list_entry_t dl_devs[1];
} crypto_get_dev_list_t;
typedef struct crypto_get_soft_list {
uint_t sl_return_value;
uint_t sl_soft_count;
size_t sl_soft_len;
caddr_t sl_soft_names;
} crypto_get_soft_list_t;
typedef struct crypto_get_dev_info {
uint_t di_return_value;
char di_dev_name[MAXNAMELEN];
uint_t di_dev_instance;
uint_t di_count;
crypto_mech_name_t di_list[1];
} crypto_get_dev_info_t;
typedef struct crypto_get_soft_info {
uint_t si_return_value;
char si_name[MAXNAMELEN];
uint_t si_count;
crypto_mech_name_t si_list[1];
} crypto_get_soft_info_t;
typedef struct crypto_load_dev_disabled {
uint_t dd_return_value;
char dd_dev_name[MAXNAMELEN];
uint_t dd_dev_instance;
uint_t dd_count;
crypto_mech_name_t dd_list[1];
} crypto_load_dev_disabled_t;
typedef struct crypto_load_soft_disabled {
uint_t sd_return_value;
char sd_name[MAXNAMELEN];
uint_t sd_count;
crypto_mech_name_t sd_list[1];
} crypto_load_soft_disabled_t;
typedef struct crypto_unload_soft_module {
uint_t sm_return_value;
char sm_name[MAXNAMELEN];
} crypto_unload_soft_module_t;
typedef struct crypto_load_soft_config {
uint_t sc_return_value;
char sc_name[MAXNAMELEN];
uint_t sc_count;
crypto_mech_name_t sc_list[1];
} crypto_load_soft_config_t;
typedef struct crypto_load_door {
uint_t ld_return_value;
uint_t ld_did;
} crypto_load_door_t;
#ifdef _KERNEL
#ifdef _SYSCALL32
typedef struct crypto_get_soft_list32 {
uint32_t sl_return_value;
uint32_t sl_soft_count;
size32_t sl_soft_len;
caddr32_t sl_soft_names;
} crypto_get_soft_list32_t;
#endif /* _SYSCALL32 */
#endif /* _KERNEL */
#define CRYPTO_GET_VERSION CRYPTOADMIN(1)
#define CRYPTO_GET_DEV_LIST CRYPTOADMIN(2)
#define CRYPTO_GET_SOFT_LIST CRYPTOADMIN(3)
#define CRYPTO_GET_DEV_INFO CRYPTOADMIN(4)
#define CRYPTO_GET_SOFT_INFO CRYPTOADMIN(5)
#define CRYPTO_LOAD_DEV_DISABLED CRYPTOADMIN(8)
#define CRYPTO_LOAD_SOFT_DISABLED CRYPTOADMIN(9)
#define CRYPTO_UNLOAD_SOFT_MODULE CRYPTOADMIN(10)
#define CRYPTO_LOAD_SOFT_CONFIG CRYPTOADMIN(11)
#define CRYPTO_POOL_CREATE CRYPTOADMIN(12)
#define CRYPTO_POOL_WAIT CRYPTOADMIN(13)
#define CRYPTO_POOL_RUN CRYPTOADMIN(14)
#define CRYPTO_LOAD_DOOR CRYPTOADMIN(15)
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_IOCTLADMIN_H */

View File

@ -0,0 +1,630 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_OPS_IMPL_H
#define _SYS_CRYPTO_OPS_IMPL_H
/*
* Scheduler internal structures.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/common.h>
/*
* The parameters needed for each function group are batched
* in one structure. This is much simpler than having a
* separate structure for each function.
*
* In some cases, a field is generically named to keep the
* structure small. The comments indicate these cases.
*/
typedef struct kcf_digest_ops_params {
crypto_session_id_t do_sid;
crypto_mech_type_t do_framework_mechtype;
crypto_mechanism_t do_mech;
crypto_data_t *do_data;
crypto_data_t *do_digest;
crypto_key_t *do_digest_key; /* Argument for digest_key() */
} kcf_digest_ops_params_t;
typedef struct kcf_mac_ops_params {
crypto_session_id_t mo_sid;
crypto_mech_type_t mo_framework_mechtype;
crypto_mechanism_t mo_mech;
crypto_key_t *mo_key;
crypto_data_t *mo_data;
crypto_data_t *mo_mac;
crypto_spi_ctx_template_t mo_templ;
} kcf_mac_ops_params_t;
typedef struct kcf_encrypt_ops_params {
crypto_session_id_t eo_sid;
crypto_mech_type_t eo_framework_mechtype;
crypto_mechanism_t eo_mech;
crypto_key_t *eo_key;
crypto_data_t *eo_plaintext;
crypto_data_t *eo_ciphertext;
crypto_spi_ctx_template_t eo_templ;
} kcf_encrypt_ops_params_t;
typedef struct kcf_decrypt_ops_params {
crypto_session_id_t dop_sid;
crypto_mech_type_t dop_framework_mechtype;
crypto_mechanism_t dop_mech;
crypto_key_t *dop_key;
crypto_data_t *dop_ciphertext;
crypto_data_t *dop_plaintext;
crypto_spi_ctx_template_t dop_templ;
} kcf_decrypt_ops_params_t;
typedef struct kcf_sign_ops_params {
crypto_session_id_t so_sid;
crypto_mech_type_t so_framework_mechtype;
crypto_mechanism_t so_mech;
crypto_key_t *so_key;
crypto_data_t *so_data;
crypto_data_t *so_signature;
crypto_spi_ctx_template_t so_templ;
} kcf_sign_ops_params_t;
typedef struct kcf_verify_ops_params {
crypto_session_id_t vo_sid;
crypto_mech_type_t vo_framework_mechtype;
crypto_mechanism_t vo_mech;
crypto_key_t *vo_key;
crypto_data_t *vo_data;
crypto_data_t *vo_signature;
crypto_spi_ctx_template_t vo_templ;
} kcf_verify_ops_params_t;
typedef struct kcf_encrypt_mac_ops_params {
crypto_session_id_t em_sid;
crypto_mech_type_t em_framework_encr_mechtype;
crypto_mechanism_t em_encr_mech;
crypto_key_t *em_encr_key;
crypto_mech_type_t em_framework_mac_mechtype;
crypto_mechanism_t em_mac_mech;
crypto_key_t *em_mac_key;
crypto_data_t *em_plaintext;
crypto_dual_data_t *em_ciphertext;
crypto_data_t *em_mac;
crypto_spi_ctx_template_t em_encr_templ;
crypto_spi_ctx_template_t em_mac_templ;
} kcf_encrypt_mac_ops_params_t;
typedef struct kcf_mac_decrypt_ops_params {
crypto_session_id_t md_sid;
crypto_mech_type_t md_framework_mac_mechtype;
crypto_mechanism_t md_mac_mech;
crypto_key_t *md_mac_key;
crypto_mech_type_t md_framework_decr_mechtype;
crypto_mechanism_t md_decr_mech;
crypto_key_t *md_decr_key;
crypto_dual_data_t *md_ciphertext;
crypto_data_t *md_mac;
crypto_data_t *md_plaintext;
crypto_spi_ctx_template_t md_mac_templ;
crypto_spi_ctx_template_t md_decr_templ;
} kcf_mac_decrypt_ops_params_t;
typedef struct kcf_random_number_ops_params {
crypto_session_id_t rn_sid;
uchar_t *rn_buf;
size_t rn_buflen;
uint_t rn_entropy_est;
uint32_t rn_flags;
} kcf_random_number_ops_params_t;
/*
* so_pd is useful when the provider descriptor (pd) supplying the
* provider handle is different from the pd supplying the ops vector.
* This is the case for session open/close where so_pd can be the pd
* of a logical provider. The pd supplying the ops vector is passed
* as an argument to kcf_submit_request().
*/
typedef struct kcf_session_ops_params {
crypto_session_id_t *so_sid_ptr;
crypto_session_id_t so_sid;
crypto_user_type_t so_user_type;
char *so_pin;
size_t so_pin_len;
kcf_provider_desc_t *so_pd;
} kcf_session_ops_params_t;
typedef struct kcf_object_ops_params {
crypto_session_id_t oo_sid;
crypto_object_id_t oo_object_id;
crypto_object_attribute_t *oo_template;
uint_t oo_attribute_count;
crypto_object_id_t *oo_object_id_ptr;
size_t *oo_object_size;
void **oo_find_init_pp_ptr;
void *oo_find_pp;
uint_t oo_max_object_count;
uint_t *oo_object_count_ptr;
} kcf_object_ops_params_t;
/*
* ko_key is used to encode wrapping key in key_wrap() and
* unwrapping key in key_unwrap(). ko_key_template and
* ko_key_attribute_count are used to encode public template
* and public template attr count in key_generate_pair().
* kops->ko_key_object_id_ptr is used to encode public key
* in key_generate_pair().
*/
typedef struct kcf_key_ops_params {
crypto_session_id_t ko_sid;
crypto_mech_type_t ko_framework_mechtype;
crypto_mechanism_t ko_mech;
crypto_object_attribute_t *ko_key_template;
uint_t ko_key_attribute_count;
crypto_object_id_t *ko_key_object_id_ptr;
crypto_object_attribute_t *ko_private_key_template;
uint_t ko_private_key_attribute_count;
crypto_object_id_t *ko_private_key_object_id_ptr;
crypto_key_t *ko_key;
uchar_t *ko_wrapped_key;
size_t *ko_wrapped_key_len_ptr;
crypto_object_attribute_t *ko_out_template1;
crypto_object_attribute_t *ko_out_template2;
uint_t ko_out_attribute_count1;
uint_t ko_out_attribute_count2;
} kcf_key_ops_params_t;
/*
* po_pin and po_pin_len are used to encode new_pin and new_pin_len
* when wrapping set_pin() function parameters.
*
* po_pd is useful when the provider descriptor (pd) supplying the
* provider handle is different from the pd supplying the ops vector.
* This is true for the ext_info provider entry point where po_pd
* can be the pd of a logical provider. The pd supplying the ops vector
* is passed as an argument to kcf_submit_request().
*/
typedef struct kcf_provmgmt_ops_params {
crypto_session_id_t po_sid;
char *po_pin;
size_t po_pin_len;
char *po_old_pin;
size_t po_old_pin_len;
char *po_label;
crypto_provider_ext_info_t *po_ext_info;
kcf_provider_desc_t *po_pd;
} kcf_provmgmt_ops_params_t;
/*
* The operation type within a function group.
*/
typedef enum kcf_op_type {
/* common ops for all mechanisms */
KCF_OP_INIT = 1,
KCF_OP_SINGLE, /* pkcs11 sense. So, INIT is already done */
KCF_OP_UPDATE,
KCF_OP_FINAL,
KCF_OP_ATOMIC,
/* digest_key op */
KCF_OP_DIGEST_KEY,
/* mac specific op */
KCF_OP_MAC_VERIFY_ATOMIC,
/* mac/cipher specific op */
KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC,
/* sign_recover ops */
KCF_OP_SIGN_RECOVER_INIT,
KCF_OP_SIGN_RECOVER,
KCF_OP_SIGN_RECOVER_ATOMIC,
/* verify_recover ops */
KCF_OP_VERIFY_RECOVER_INIT,
KCF_OP_VERIFY_RECOVER,
KCF_OP_VERIFY_RECOVER_ATOMIC,
/* random number ops */
KCF_OP_RANDOM_SEED,
KCF_OP_RANDOM_GENERATE,
/* session management ops */
KCF_OP_SESSION_OPEN,
KCF_OP_SESSION_CLOSE,
KCF_OP_SESSION_LOGIN,
KCF_OP_SESSION_LOGOUT,
/* object management ops */
KCF_OP_OBJECT_CREATE,
KCF_OP_OBJECT_COPY,
KCF_OP_OBJECT_DESTROY,
KCF_OP_OBJECT_GET_SIZE,
KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE,
KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE,
KCF_OP_OBJECT_FIND_INIT,
KCF_OP_OBJECT_FIND,
KCF_OP_OBJECT_FIND_FINAL,
/* key management ops */
KCF_OP_KEY_GENERATE,
KCF_OP_KEY_GENERATE_PAIR,
KCF_OP_KEY_WRAP,
KCF_OP_KEY_UNWRAP,
KCF_OP_KEY_DERIVE,
KCF_OP_KEY_CHECK,
/* provider management ops */
KCF_OP_MGMT_EXTINFO,
KCF_OP_MGMT_INITTOKEN,
KCF_OP_MGMT_INITPIN,
KCF_OP_MGMT_SETPIN
} kcf_op_type_t;
/*
* The operation groups that need wrapping of parameters. This is somewhat
* similar to the function group type in spi.h except that this also includes
* all the functions that don't have a mechanism.
*
* The wrapper macros should never take these enum values as an argument.
* Rather, they are assigned in the macro itself since they are known
* from the macro name.
*/
typedef enum kcf_op_group {
KCF_OG_DIGEST = 1,
KCF_OG_MAC,
KCF_OG_ENCRYPT,
KCF_OG_DECRYPT,
KCF_OG_SIGN,
KCF_OG_VERIFY,
KCF_OG_ENCRYPT_MAC,
KCF_OG_MAC_DECRYPT,
KCF_OG_RANDOM,
KCF_OG_SESSION,
KCF_OG_OBJECT,
KCF_OG_KEY,
KCF_OG_PROVMGMT,
KCF_OG_NOSTORE_KEY
} kcf_op_group_t;
/*
* The kcf_op_type_t enum values used here should be only for those
* operations for which there is a k-api routine in sys/crypto/api.h.
*/
#define IS_INIT_OP(ftype) ((ftype) == KCF_OP_INIT)
#define IS_SINGLE_OP(ftype) ((ftype) == KCF_OP_SINGLE)
#define IS_UPDATE_OP(ftype) ((ftype) == KCF_OP_UPDATE)
#define IS_FINAL_OP(ftype) ((ftype) == KCF_OP_FINAL)
#define IS_ATOMIC_OP(ftype) ( \
(ftype) == KCF_OP_ATOMIC || (ftype) == KCF_OP_MAC_VERIFY_ATOMIC || \
(ftype) == KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC || \
(ftype) == KCF_OP_SIGN_RECOVER_ATOMIC || \
(ftype) == KCF_OP_VERIFY_RECOVER_ATOMIC)
/*
* Keep the parameters associated with a request around.
* We need to pass them to the SPI.
*/
typedef struct kcf_req_params {
kcf_op_group_t rp_opgrp;
kcf_op_type_t rp_optype;
union {
kcf_digest_ops_params_t digest_params;
kcf_mac_ops_params_t mac_params;
kcf_encrypt_ops_params_t encrypt_params;
kcf_decrypt_ops_params_t decrypt_params;
kcf_sign_ops_params_t sign_params;
kcf_verify_ops_params_t verify_params;
kcf_encrypt_mac_ops_params_t encrypt_mac_params;
kcf_mac_decrypt_ops_params_t mac_decrypt_params;
kcf_random_number_ops_params_t random_number_params;
kcf_session_ops_params_t session_params;
kcf_object_ops_params_t object_params;
kcf_key_ops_params_t key_params;
kcf_provmgmt_ops_params_t provmgmt_params;
} rp_u;
} kcf_req_params_t;
/*
* The ioctl/k-api code should bundle the parameters into a kcf_req_params_t
* structure before calling a scheduler routine. The following macros are
* available for that purpose.
*
* For the most part, the macro arguments closely correspond to the
* function parameters. In some cases, we use generic names. The comments
* for the structure should indicate these cases.
*/
#define KCF_WRAP_DIGEST_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _digest) { \
kcf_digest_ops_params_t *dops = &(req)->rp_u.digest_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_DIGEST; \
(req)->rp_optype = ftype; \
dops->do_sid = _sid; \
if (mechp != NULL) { \
dops->do_mech = *mechp; \
dops->do_framework_mechtype = mechp->cm_type; \
} \
dops->do_digest_key = _key; \
dops->do_data = _data; \
dops->do_digest = _digest; \
}
#define KCF_WRAP_MAC_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _mac, _templ) { \
kcf_mac_ops_params_t *mops = &(req)->rp_u.mac_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_MAC; \
(req)->rp_optype = ftype; \
mops->mo_sid = _sid; \
if (mechp != NULL) { \
mops->mo_mech = *mechp; \
mops->mo_framework_mechtype = mechp->cm_type; \
} \
mops->mo_key = _key; \
mops->mo_data = _data; \
mops->mo_mac = _mac; \
mops->mo_templ = _templ; \
}
#define KCF_WRAP_ENCRYPT_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_plaintext, _ciphertext, _templ) { \
kcf_encrypt_ops_params_t *cops = &(req)->rp_u.encrypt_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_ENCRYPT; \
(req)->rp_optype = ftype; \
cops->eo_sid = _sid; \
if (mechp != NULL) { \
cops->eo_mech = *mechp; \
cops->eo_framework_mechtype = mechp->cm_type; \
} \
cops->eo_key = _key; \
cops->eo_plaintext = _plaintext; \
cops->eo_ciphertext = _ciphertext; \
cops->eo_templ = _templ; \
}
#define KCF_WRAP_DECRYPT_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_ciphertext, _plaintext, _templ) { \
kcf_decrypt_ops_params_t *cops = &(req)->rp_u.decrypt_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_DECRYPT; \
(req)->rp_optype = ftype; \
cops->dop_sid = _sid; \
if (mechp != NULL) { \
cops->dop_mech = *mechp; \
cops->dop_framework_mechtype = mechp->cm_type; \
} \
cops->dop_key = _key; \
cops->dop_ciphertext = _ciphertext; \
cops->dop_plaintext = _plaintext; \
cops->dop_templ = _templ; \
}
#define KCF_WRAP_SIGN_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _signature, _templ) { \
kcf_sign_ops_params_t *sops = &(req)->rp_u.sign_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_SIGN; \
(req)->rp_optype = ftype; \
sops->so_sid = _sid; \
if (mechp != NULL) { \
sops->so_mech = *mechp; \
sops->so_framework_mechtype = mechp->cm_type; \
} \
sops->so_key = _key; \
sops->so_data = _data; \
sops->so_signature = _signature; \
sops->so_templ = _templ; \
}
#define KCF_WRAP_VERIFY_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _signature, _templ) { \
kcf_verify_ops_params_t *vops = &(req)->rp_u.verify_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_VERIFY; \
(req)->rp_optype = ftype; \
vops->vo_sid = _sid; \
if (mechp != NULL) { \
vops->vo_mech = *mechp; \
vops->vo_framework_mechtype = mechp->cm_type; \
} \
vops->vo_key = _key; \
vops->vo_data = _data; \
vops->vo_signature = _signature; \
vops->vo_templ = _templ; \
}
#define KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(req, ftype, _sid, _encr_key, \
_mac_key, _plaintext, _ciphertext, _mac, _encr_templ, _mac_templ) { \
kcf_encrypt_mac_ops_params_t *cmops = &(req)->rp_u.encrypt_mac_params; \
\
(req)->rp_opgrp = KCF_OG_ENCRYPT_MAC; \
(req)->rp_optype = ftype; \
cmops->em_sid = _sid; \
cmops->em_encr_key = _encr_key; \
cmops->em_mac_key = _mac_key; \
cmops->em_plaintext = _plaintext; \
cmops->em_ciphertext = _ciphertext; \
cmops->em_mac = _mac; \
cmops->em_encr_templ = _encr_templ; \
cmops->em_mac_templ = _mac_templ; \
}
#define KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(req, ftype, _sid, _mac_key, \
_decr_key, _ciphertext, _mac, _plaintext, _mac_templ, _decr_templ) { \
kcf_mac_decrypt_ops_params_t *cmops = &(req)->rp_u.mac_decrypt_params; \
\
(req)->rp_opgrp = KCF_OG_MAC_DECRYPT; \
(req)->rp_optype = ftype; \
cmops->md_sid = _sid; \
cmops->md_mac_key = _mac_key; \
cmops->md_decr_key = _decr_key; \
cmops->md_ciphertext = _ciphertext; \
cmops->md_mac = _mac; \
cmops->md_plaintext = _plaintext; \
cmops->md_mac_templ = _mac_templ; \
cmops->md_decr_templ = _decr_templ; \
}
#define KCF_WRAP_RANDOM_OPS_PARAMS(req, ftype, _sid, _buf, _buflen, \
_est, _flags) { \
kcf_random_number_ops_params_t *rops = \
&(req)->rp_u.random_number_params; \
\
(req)->rp_opgrp = KCF_OG_RANDOM; \
(req)->rp_optype = ftype; \
rops->rn_sid = _sid; \
rops->rn_buf = _buf; \
rops->rn_buflen = _buflen; \
rops->rn_entropy_est = _est; \
rops->rn_flags = _flags; \
}
#define KCF_WRAP_SESSION_OPS_PARAMS(req, ftype, _sid_ptr, _sid, \
_user_type, _pin, _pin_len, _pd) { \
kcf_session_ops_params_t *sops = &(req)->rp_u.session_params; \
\
(req)->rp_opgrp = KCF_OG_SESSION; \
(req)->rp_optype = ftype; \
sops->so_sid_ptr = _sid_ptr; \
sops->so_sid = _sid; \
sops->so_user_type = _user_type; \
sops->so_pin = _pin; \
sops->so_pin_len = _pin_len; \
sops->so_pd = _pd; \
}
#define KCF_WRAP_OBJECT_OPS_PARAMS(req, ftype, _sid, _object_id, \
_template, _attribute_count, _object_id_ptr, _object_size, \
_find_init_pp_ptr, _find_pp, _max_object_count, _object_count_ptr) { \
kcf_object_ops_params_t *jops = &(req)->rp_u.object_params; \
\
(req)->rp_opgrp = KCF_OG_OBJECT; \
(req)->rp_optype = ftype; \
jops->oo_sid = _sid; \
jops->oo_object_id = _object_id; \
jops->oo_template = _template; \
jops->oo_attribute_count = _attribute_count; \
jops->oo_object_id_ptr = _object_id_ptr; \
jops->oo_object_size = _object_size; \
jops->oo_find_init_pp_ptr = _find_init_pp_ptr; \
jops->oo_find_pp = _find_pp; \
jops->oo_max_object_count = _max_object_count; \
jops->oo_object_count_ptr = _object_count_ptr; \
}
#define KCF_WRAP_KEY_OPS_PARAMS(req, ftype, _sid, _mech, _key_template, \
_key_attribute_count, _key_object_id_ptr, _private_key_template, \
_private_key_attribute_count, _private_key_object_id_ptr, \
_key, _wrapped_key, _wrapped_key_len_ptr) { \
kcf_key_ops_params_t *kops = &(req)->rp_u.key_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_KEY; \
(req)->rp_optype = ftype; \
kops->ko_sid = _sid; \
if (mechp != NULL) { \
kops->ko_mech = *mechp; \
kops->ko_framework_mechtype = mechp->cm_type; \
} \
kops->ko_key_template = _key_template; \
kops->ko_key_attribute_count = _key_attribute_count; \
kops->ko_key_object_id_ptr = _key_object_id_ptr; \
kops->ko_private_key_template = _private_key_template; \
kops->ko_private_key_attribute_count = _private_key_attribute_count; \
kops->ko_private_key_object_id_ptr = _private_key_object_id_ptr; \
kops->ko_key = _key; \
kops->ko_wrapped_key = _wrapped_key; \
kops->ko_wrapped_key_len_ptr = _wrapped_key_len_ptr; \
}
#define KCF_WRAP_PROVMGMT_OPS_PARAMS(req, ftype, _sid, _old_pin, \
_old_pin_len, _pin, _pin_len, _label, _ext_info, _pd) { \
kcf_provmgmt_ops_params_t *pops = &(req)->rp_u.provmgmt_params; \
\
(req)->rp_opgrp = KCF_OG_PROVMGMT; \
(req)->rp_optype = ftype; \
pops->po_sid = _sid; \
pops->po_pin = _pin; \
pops->po_pin_len = _pin_len; \
pops->po_old_pin = _old_pin; \
pops->po_old_pin_len = _old_pin_len; \
pops->po_label = _label; \
pops->po_ext_info = _ext_info; \
pops->po_pd = _pd; \
}
#define KCF_WRAP_NOSTORE_KEY_OPS_PARAMS(req, ftype, _sid, _mech, \
_key_template, _key_attribute_count, _private_key_template, \
_private_key_attribute_count, _key, _out_template1, \
_out_attribute_count1, _out_template2, _out_attribute_count2) { \
kcf_key_ops_params_t *kops = &(req)->rp_u.key_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_NOSTORE_KEY; \
(req)->rp_optype = ftype; \
kops->ko_sid = _sid; \
if (mechp != NULL) { \
kops->ko_mech = *mechp; \
kops->ko_framework_mechtype = mechp->cm_type; \
} \
kops->ko_key_template = _key_template; \
kops->ko_key_attribute_count = _key_attribute_count; \
kops->ko_key_object_id_ptr = NULL; \
kops->ko_private_key_template = _private_key_template; \
kops->ko_private_key_attribute_count = _private_key_attribute_count; \
kops->ko_private_key_object_id_ptr = NULL; \
kops->ko_key = _key; \
kops->ko_wrapped_key = NULL; \
kops->ko_wrapped_key_len_ptr = 0; \
kops->ko_out_template1 = _out_template1; \
kops->ko_out_template2 = _out_template2; \
kops->ko_out_attribute_count1 = _out_attribute_count1; \
kops->ko_out_attribute_count2 = _out_attribute_count2; \
}
#define KCF_SET_PROVIDER_MECHNUM(fmtype, pd, mechp) \
(mechp)->cm_type = \
KCF_TO_PROV_MECHNUM(pd, fmtype);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_OPS_IMPL_H */

View File

@ -0,0 +1,531 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_SCHED_IMPL_H
#define _SYS_CRYPTO_SCHED_IMPL_H
/*
* Scheduler internal structures.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/common.h>
#include <sys/crypto/ops_impl.h>
typedef void (kcf_func_t)(void *, int);
typedef enum kcf_req_status {
REQ_ALLOCATED = 1,
REQ_WAITING, /* At the framework level */
REQ_INPROGRESS, /* At the provider level */
REQ_DONE,
REQ_CANCELED
} kcf_req_status_t;
typedef enum kcf_call_type {
CRYPTO_SYNCH = 1,
CRYPTO_ASYNCH
} kcf_call_type_t;
#define CHECK_RESTRICT(crq) (crq != NULL && \
((crq)->cr_flag & CRYPTO_RESTRICTED))
#define CHECK_RESTRICT_FALSE B_FALSE
#define CHECK_FASTPATH(crq, pd) ((crq) == NULL || \
!((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE)) && \
(pd)->pd_prov_type == CRYPTO_SW_PROVIDER
#define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
/*
* The framework keeps an internal handle to use in the adaptive
* asynchronous case. This is the case when a client has the
* CRYPTO_ALWAYS_QUEUE bit clear and a software provider is used for
* the request. The request is completed in the context of the calling
* thread and kernel memory must be allocated with KM_NOSLEEP.
*
* The framework passes a pointer to the handle in crypto_req_handle_t
* argument when it calls the SPI of the software provider. The macros
* KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this.
*
* When a provider asks the framework for kmflag value via
* crypto_kmflag(9S) we use REQHNDL2_KMFLAG() macro.
*/
extern ulong_t kcf_swprov_hndl;
#define KCF_RHNDL(kmflag) (((kmflag) == KM_SLEEP) ? NULL : &kcf_swprov_hndl)
#define KCF_SWFP_RHNDL(crq) (((crq) == NULL) ? NULL : &kcf_swprov_hndl)
#define REQHNDL2_KMFLAG(rhndl) \
((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP)
/* Internal call_req flags. They start after the public ones in api.h */
#define CRYPTO_SETDUAL 0x00001000 /* Set the 'cont' boolean before */
/* submitting the request */
#define KCF_ISDUALREQ(crq) \
(((crq) == NULL) ? B_FALSE : (crq->cr_flag & CRYPTO_SETDUAL))
typedef struct kcf_prov_tried {
kcf_provider_desc_t *pt_pd;
struct kcf_prov_tried *pt_next;
} kcf_prov_tried_t;
#define IS_FG_SUPPORTED(mdesc, fg) \
(((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
#define IS_PROVIDER_TRIED(pd, tlist) \
(tlist != NULL && is_in_triedlist(pd, tlist))
#define IS_RECOVERABLE(error) \
(error == CRYPTO_BUFFER_TOO_BIG || \
error == CRYPTO_BUSY || \
error == CRYPTO_DEVICE_ERROR || \
error == CRYPTO_DEVICE_MEMORY || \
error == CRYPTO_KEY_SIZE_RANGE || \
error == CRYPTO_NO_PERMISSION)
#define KCF_ATOMIC_INCR(x) atomic_add_32(&(x), 1)
#define KCF_ATOMIC_DECR(x) atomic_add_32(&(x), -1)
/*
* Node structure for synchronous requests.
*/
typedef struct kcf_sreq_node {
/* Should always be the first field in this structure */
kcf_call_type_t sn_type;
/*
* sn_cv and sr_lock are used to wait for the
* operation to complete. sn_lock also protects
* the sn_state field.
*/
kcondvar_t sn_cv;
kmutex_t sn_lock;
kcf_req_status_t sn_state;
/*
* Return value from the operation. This will be
* one of the CRYPTO_* errors defined in common.h.
*/
int sn_rv;
/*
* parameters to call the SPI with. This can be
* a pointer as we know the caller context/stack stays.
*/
struct kcf_req_params *sn_params;
/* Internal context for this request */
struct kcf_context *sn_context;
/* Provider handling this request */
kcf_provider_desc_t *sn_provider;
} kcf_sreq_node_t;
/*
* Node structure for asynchronous requests. A node can be on
* on a chain of requests hanging of the internal context
* structure and can be in the global software provider queue.
*/
typedef struct kcf_areq_node {
/* Should always be the first field in this structure */
kcf_call_type_t an_type;
/* an_lock protects the field an_state */
kmutex_t an_lock;
kcf_req_status_t an_state;
crypto_call_req_t an_reqarg;
/*
* parameters to call the SPI with. We need to
* save the params since the caller stack can go away.
*/
struct kcf_req_params an_params;
/*
* The next two fields should be NULL for operations that
* don't need a context.
*/
/* Internal context for this request */
struct kcf_context *an_context;
/* next in chain of requests for context */
struct kcf_areq_node *an_ctxchain_next;
kcondvar_t an_turn_cv;
boolean_t an_is_my_turn;
boolean_t an_isdual; /* for internal reuse */
/*
* Next and previous nodes in the global software
* queue. These fields are NULL for a hardware
* provider since we use a taskq there.
*/
struct kcf_areq_node *an_next;
struct kcf_areq_node *an_prev;
/* Provider handling this request */
kcf_provider_desc_t *an_provider;
kcf_prov_tried_t *an_tried_plist;
struct kcf_areq_node *an_idnext; /* Next in ID hash */
struct kcf_areq_node *an_idprev; /* Prev in ID hash */
kcondvar_t an_done; /* Signal request completion */
uint_t an_refcnt;
} kcf_areq_node_t;
#define KCF_AREQ_REFHOLD(areq) { \
atomic_add_32(&(areq)->an_refcnt, 1); \
ASSERT((areq)->an_refcnt != 0); \
}
#define KCF_AREQ_REFRELE(areq) { \
ASSERT((areq)->an_refcnt != 0); \
membar_exit(); \
if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0) \
kcf_free_req(areq); \
}
#define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
#define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
(areq)->an_reqarg.cr_callback_arg, err);
/* For internally generated call requests for dual operations */
typedef struct kcf_call_req {
crypto_call_req_t kr_callreq; /* external client call req */
kcf_req_params_t kr_params; /* Params saved for next call */
kcf_areq_node_t *kr_areq; /* Use this areq */
off_t kr_saveoffset;
size_t kr_savelen;
} kcf_dual_req_t;
/*
* The following are some what similar to macros in callo.h, which implement
* callout tables.
*
* The lower four bits of the ID are used to encode the table ID to
* index in to. The REQID_COUNTER_HIGH bit is used to avoid any check for
* wrap around when generating ID. We assume that there won't be a request
* which takes more time than 2^^(sizeof (long) - 5) other requests submitted
* after it. This ensures there won't be any ID collision.
*/
#define REQID_COUNTER_HIGH (1UL << (8 * sizeof (long) - 1))
#define REQID_COUNTER_SHIFT 4
#define REQID_COUNTER_LOW (1 << REQID_COUNTER_SHIFT)
#define REQID_TABLES 16
#define REQID_TABLE_MASK (REQID_TABLES - 1)
#define REQID_BUCKETS 512
#define REQID_BUCKET_MASK (REQID_BUCKETS - 1)
#define REQID_HASH(id) (((id) >> REQID_COUNTER_SHIFT) & REQID_BUCKET_MASK)
#define GET_REQID(areq) (areq)->an_reqarg.cr_reqid
#define SET_REQID(areq, val) GET_REQID(areq) = val
/*
* Hash table for async requests.
*/
typedef struct kcf_reqid_table {
kmutex_t rt_lock;
crypto_req_id_t rt_curid;
kcf_areq_node_t *rt_idhash[REQID_BUCKETS];
} kcf_reqid_table_t;
/*
* Global software provider queue structure. Requests to be
* handled by a SW provider and have the ALWAYS_QUEUE flag set
* get queued here.
*/
typedef struct kcf_global_swq {
/*
* gs_cv and gs_lock are used to wait for new requests.
* gs_lock protects the changes to the queue.
*/
kcondvar_t gs_cv;
kmutex_t gs_lock;
uint_t gs_njobs;
uint_t gs_maxjobs;
kcf_areq_node_t *gs_first;
kcf_areq_node_t *gs_last;
} kcf_global_swq_t;
/*
* Internal representation of a canonical context. We contain crypto_ctx_t
* structure in order to have just one memory allocation. The SPI
* ((crypto_ctx_t *)ctx)->cc_framework_private maps to this structure.
*/
typedef struct kcf_context {
crypto_ctx_t kc_glbl_ctx;
uint_t kc_refcnt;
kmutex_t kc_in_use_lock;
/*
* kc_req_chain_first and kc_req_chain_last are used to chain
* multiple async requests using the same context. They should be
* NULL for sync requests.
*/
kcf_areq_node_t *kc_req_chain_first;
kcf_areq_node_t *kc_req_chain_last;
kcf_provider_desc_t *kc_prov_desc; /* Prov. descriptor */
kcf_provider_desc_t *kc_sw_prov_desc; /* Prov. descriptor */
kcf_mech_entry_t *kc_mech;
struct kcf_context *kc_secondctx; /* for dual contexts */
} kcf_context_t;
/*
* Bump up the reference count on the framework private context. A
* global context or a request that references this structure should
* do a hold.
*/
#define KCF_CONTEXT_REFHOLD(ictx) { \
atomic_add_32(&(ictx)->kc_refcnt, 1); \
ASSERT((ictx)->kc_refcnt != 0); \
}
/*
* Decrement the reference count on the framework private context.
* When the last reference is released, the framework private
* context structure is freed along with the global context.
*/
#define KCF_CONTEXT_REFRELE(ictx) { \
ASSERT((ictx)->kc_refcnt != 0); \
membar_exit(); \
if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0) \
kcf_free_context(ictx); \
}
/*
* Check if we can release the context now. In case of CRYPTO_QUEUED
* we do not release it as we can do it only after the provider notified
* us. In case of CRYPTO_BUSY, the client can retry the request using
* the context, so we do not release the context.
*
* This macro should be called only from the final routine in
* an init/update/final sequence. We do not release the context in case
* of update operations. We require the consumer to free it
* explicitly, in case it wants to abandon the operation. This is done
* as there may be mechanisms in ECB mode that can continue even if
* an operation on a block fails.
*/
#define KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) { \
if (KCF_CONTEXT_DONE(rv)) \
KCF_CONTEXT_REFRELE(kcf_ctx); \
}
/*
* This macro determines whether we're done with a context.
*/
#define KCF_CONTEXT_DONE(rv) \
((rv) != CRYPTO_QUEUED && (rv) != CRYPTO_BUSY && \
(rv) != CRYPTO_BUFFER_TOO_SMALL)
/*
* A crypto_ctx_template_t is internally a pointer to this struct
*/
typedef struct kcf_ctx_template {
crypto_kcf_provider_handle_t ct_prov_handle; /* provider handle */
uint_t ct_generation; /* generation # */
size_t ct_size; /* for freeing */
crypto_spi_ctx_template_t ct_prov_tmpl; /* context template */
/* from the SW prov */
} kcf_ctx_template_t;
/*
* Structure for pool of threads working on global software queue.
*/
typedef struct kcf_pool {
uint32_t kp_threads; /* Number of threads in pool */
uint32_t kp_idlethreads; /* Idle threads in pool */
uint32_t kp_blockedthreads; /* Blocked threads in pool */
/*
* cv & lock to monitor the condition when no threads
* are around. In this case the failover thread kicks in.
*/
kcondvar_t kp_nothr_cv;
kmutex_t kp_thread_lock;
/* Userspace thread creator variables. */
boolean_t kp_signal_create_thread; /* Create requested flag */
int kp_nthrs; /* # of threads to create */
boolean_t kp_user_waiting; /* Thread waiting for work */
/*
* cv & lock for the condition where more threads need to be
* created. kp_user_lock also protects the three fileds above.
*/
kcondvar_t kp_user_cv; /* Creator cond. variable */
kmutex_t kp_user_lock; /* Creator lock */
} kcf_pool_t;
/*
* State of a crypto bufcall element.
*/
typedef enum cbuf_state {
CBUF_FREE = 1,
CBUF_WAITING,
CBUF_RUNNING
} cbuf_state_t;
/*
* Structure of a crypto bufcall element.
*/
typedef struct kcf_cbuf_elem {
/*
* lock and cv to wait for CBUF_RUNNING to be done
* kc_lock also protects kc_state.
*/
kmutex_t kc_lock;
kcondvar_t kc_cv;
cbuf_state_t kc_state;
struct kcf_cbuf_elem *kc_next;
struct kcf_cbuf_elem *kc_prev;
void (*kc_func)(void *arg);
void *kc_arg;
} kcf_cbuf_elem_t;
/*
* State of a notify element.
*/
typedef enum ntfy_elem_state {
NTFY_WAITING = 1,
NTFY_RUNNING
} ntfy_elem_state_t;
/*
* Structure of a notify list element.
*/
typedef struct kcf_ntfy_elem {
/*
* lock and cv to wait for NTFY_RUNNING to be done.
* kn_lock also protects kn_state.
*/
kmutex_t kn_lock;
kcondvar_t kn_cv;
ntfy_elem_state_t kn_state;
struct kcf_ntfy_elem *kn_next;
struct kcf_ntfy_elem *kn_prev;
crypto_notify_callback_t kn_func;
uint32_t kn_event_mask;
} kcf_ntfy_elem_t;
/*
* The following values are based on the assumption that it would
* take around eight cpus to load a hardware provider (This is true for
* at least one product) and a kernel client may come from different
* low-priority interrupt levels. We will have CYRPTO_TASKQ_MIN number
* of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on
* a throughput of 1GB/s using 512-byte buffers. These are just
* reasonable estimates and might need to change in future.
*/
#define CRYPTO_TASKQ_THREADS 8
#define CYRPTO_TASKQ_MIN 64
#define CRYPTO_TASKQ_MAX 2 * 1024 * 1024
extern int crypto_taskq_threads;
extern int crypto_taskq_minalloc;
extern int crypto_taskq_maxalloc;
extern kcf_global_swq_t *gswq;
extern int kcf_maxthreads;
extern int kcf_minthreads;
/*
* All pending crypto bufcalls are put on a list. cbuf_list_lock
* protects changes to this list.
*/
extern kmutex_t cbuf_list_lock;
extern kcondvar_t cbuf_list_cv;
/*
* All event subscribers are put on a list. kcf_notify_list_lock
* protects changes to this list.
*/
extern kmutex_t ntfy_list_lock;
extern kcondvar_t ntfy_list_cv;
boolean_t kcf_get_next_logical_provider_member(kcf_provider_desc_t *,
kcf_provider_desc_t *, kcf_provider_desc_t **);
extern int kcf_get_hardware_provider(crypto_mech_type_t, crypto_mech_type_t,
boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **,
crypto_func_group_t);
extern int kcf_get_hardware_provider_nomech(offset_t, offset_t,
boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **);
extern void kcf_free_triedlist(kcf_prov_tried_t *);
extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
kcf_provider_desc_t *, int);
extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
kcf_mech_entry_t **, int *, kcf_prov_tried_t *, crypto_func_group_t,
boolean_t, size_t);
extern kcf_provider_desc_t *kcf_get_dual_provider(crypto_mechanism_t *,
crypto_mechanism_t *, kcf_mech_entry_t **, crypto_mech_type_t *,
crypto_mech_type_t *, int *, kcf_prov_tried_t *,
crypto_func_group_t, crypto_func_group_t, boolean_t, size_t);
extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
crypto_session_id_t);
extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,
crypto_call_req_t *, kcf_req_params_t *, boolean_t);
extern void kcf_sched_destroy(void);
extern void kcf_sched_init(void);
extern void kcf_sched_start(void);
extern void kcf_sop_done(kcf_sreq_node_t *, int);
extern void kcf_aop_done(kcf_areq_node_t *, int);
extern int common_submit_request(kcf_provider_desc_t *,
crypto_ctx_t *, kcf_req_params_t *, crypto_req_handle_t);
extern void kcf_free_context(kcf_context_t *);
extern int kcf_svc_wait(int *);
extern int kcf_svc_do_run(void);
extern int kcf_need_signature_verification(kcf_provider_desc_t *);
extern void kcf_verify_signature(void *);
extern struct modctl *kcf_get_modctl(crypto_provider_info_t *);
extern void verify_unverified_providers(void);
extern void kcf_free_req(kcf_areq_node_t *areq);
extern void crypto_bufcall_service(void);
extern void kcf_walk_ntfylist(uint32_t, void *);
extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t);
extern kcf_dual_req_t *kcf_alloc_req(crypto_call_req_t *);
extern void kcf_next_req(void *, int);
extern void kcf_last_req(void *, int);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_SCHED_IMPL_H */

View File

@ -0,0 +1,721 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_SPI_H
#define _SYS_CRYPTO_SPI_H
/*
* CSPI: Cryptographic Service Provider Interface.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#ifdef __cplusplus
extern "C" {
#endif
#define CRYPTO_SPI_VERSION_1 1
#define CRYPTO_SPI_VERSION_2 2
#define CRYPTO_SPI_VERSION_3 3
/*
* Provider-private handle. This handle is specified by a provider
* when it registers by means of the pi_provider_handle field of
* the crypto_provider_info structure, and passed to the provider
* when its entry points are invoked.
*/
typedef void *crypto_provider_handle_t;
/*
* Context templates can be used to by software providers to pre-process
* keying material, such as key schedules. They are allocated by
* a software provider create_ctx_template(9E) entry point, and passed
* as argument to initialization and atomic provider entry points.
*/
typedef void *crypto_spi_ctx_template_t;
/*
* Request handles are used by the kernel to identify an asynchronous
* request being processed by a provider. It is passed by the kernel
* to a hardware provider when submitting a request, and must be
* specified by a provider when calling crypto_op_notification(9F)
*/
typedef void *crypto_req_handle_t;
/* Values for cc_flags field */
#define CRYPTO_INIT_OPSTATE 0x00000001 /* allocate and init cc_opstate */
#define CRYPTO_USE_OPSTATE 0x00000002 /* .. start using it as context */
/*
* The context structure is passed from the kernel to a provider.
* It contains the information needed to process a multi-part or
* single part operation. The context structure is not used
* by atomic operations.
*
* Parameters needed to perform a cryptographic operation, such
* as keys, mechanisms, input and output buffers, are passed
* as separate arguments to Provider routines.
*/
typedef struct crypto_ctx {
crypto_provider_handle_t cc_provider;
crypto_session_id_t cc_session;
void *cc_provider_private; /* owned by provider */
void *cc_framework_private; /* owned by framework */
uint32_t cc_flags; /* flags */
void *cc_opstate; /* state */
} crypto_ctx_t;
/*
* Extended provider information.
*/
/*
* valid values for ei_flags field of extended info structure
* They match the RSA Security, Inc PKCS#11 tokenInfo flags.
*/
#define CRYPTO_EXTF_RNG 0x00000001
#define CRYPTO_EXTF_WRITE_PROTECTED 0x00000002
#define CRYPTO_EXTF_LOGIN_REQUIRED 0x00000004
#define CRYPTO_EXTF_USER_PIN_INITIALIZED 0x00000008
#define CRYPTO_EXTF_CLOCK_ON_TOKEN 0x00000040
#define CRYPTO_EXTF_PROTECTED_AUTHENTICATION_PATH 0x00000100
#define CRYPTO_EXTF_DUAL_CRYPTO_OPERATIONS 0x00000200
#define CRYPTO_EXTF_TOKEN_INITIALIZED 0x00000400
#define CRYPTO_EXTF_USER_PIN_COUNT_LOW 0x00010000
#define CRYPTO_EXTF_USER_PIN_FINAL_TRY 0x00020000
#define CRYPTO_EXTF_USER_PIN_LOCKED 0x00040000
#define CRYPTO_EXTF_USER_PIN_TO_BE_CHANGED 0x00080000
#define CRYPTO_EXTF_SO_PIN_COUNT_LOW 0x00100000
#define CRYPTO_EXTF_SO_PIN_FINAL_TRY 0x00200000
#define CRYPTO_EXTF_SO_PIN_LOCKED 0x00400000
#define CRYPTO_EXTF_SO_PIN_TO_BE_CHANGED 0x00800000
/*
* The crypto_control_ops structure contains pointers to control
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_control_ops {
void (*provider_status)(crypto_provider_handle_t, uint_t *);
} crypto_control_ops_t;
/*
* The crypto_ctx_ops structure contains points to context and context
* templates management operations for cryptographic providers. It is
* passed through the crypto_ops(9S) structure when providers register
* with the kernel using crypto_register_provider(9F).
*/
typedef struct crypto_ctx_ops {
int (*create_ctx_template)(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t *, size_t *, crypto_req_handle_t);
int (*free_context)(crypto_ctx_t *);
} crypto_ctx_ops_t;
/*
* The crypto_digest_ops structure contains pointers to digest
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_digest_ops {
int (*digest_init)(crypto_ctx_t *, crypto_mechanism_t *,
crypto_req_handle_t);
int (*digest)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
int (*digest_update)(crypto_ctx_t *, crypto_data_t *,
crypto_req_handle_t);
int (*digest_key)(crypto_ctx_t *, crypto_key_t *, crypto_req_handle_t);
int (*digest_final)(crypto_ctx_t *, crypto_data_t *,
crypto_req_handle_t);
int (*digest_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
} crypto_digest_ops_t;
/*
* The crypto_cipher_ops structure contains pointers to encryption
* and decryption operations for cryptographic providers. It is
* passed through the crypto_ops(9S) structure when providers register
* with the kernel using crypto_register_provider(9F).
*/
typedef struct crypto_cipher_ops {
int (*encrypt_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*encrypt)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*encrypt_update)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*encrypt_final)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*encrypt_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*decrypt_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*decrypt)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*decrypt_update)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*decrypt_final)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*decrypt_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
} crypto_cipher_ops_t;
/*
* The crypto_mac_ops structure contains pointers to MAC
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_mac_ops {
int (*mac_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*mac)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*mac_update)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*mac_final)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*mac_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*mac_verify_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
} crypto_mac_ops_t;
/*
* The crypto_sign_ops structure contains pointers to signing
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_sign_ops {
int (*sign_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*sign)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*sign_update)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*sign_final)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*sign_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*sign_recover_init)(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*sign_recover)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*sign_recover_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
} crypto_sign_ops_t;
/*
* The crypto_verify_ops structure contains pointers to verify
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_verify_ops {
int (*verify_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*do_verify)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*verify_update)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*verify_final)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*verify_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*verify_recover_init)(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*verify_recover)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*verify_recover_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
} crypto_verify_ops_t;
/*
* The crypto_dual_ops structure contains pointers to dual
* cipher and sign/verify operations for cryptographic providers.
* It is passed through the crypto_ops(9S) structure when
* providers register with the kernel using
* crypto_register_provider(9F).
*/
typedef struct crypto_dual_ops {
int (*digest_encrypt_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
int (*decrypt_digest_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
int (*sign_encrypt_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
int (*decrypt_verify_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
} crypto_dual_ops_t;
/*
* The crypto_dual_cipher_mac_ops structure contains pointers to dual
* cipher and MAC operations for cryptographic providers.
* It is passed through the crypto_ops(9S) structure when
* providers register with the kernel using
* crypto_register_provider(9F).
*/
typedef struct crypto_dual_cipher_mac_ops {
int (*encrypt_mac_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*encrypt_mac)(crypto_ctx_t *,
crypto_data_t *, crypto_dual_data_t *, crypto_data_t *,
crypto_req_handle_t);
int (*encrypt_mac_update)(crypto_ctx_t *,
crypto_data_t *, crypto_dual_data_t *, crypto_req_handle_t);
int (*encrypt_mac_final)(crypto_ctx_t *,
crypto_dual_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*encrypt_mac_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_data_t *, crypto_dual_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*mac_decrypt_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*mac_decrypt)(crypto_ctx_t *,
crypto_dual_data_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
int (*mac_decrypt_update)(crypto_ctx_t *,
crypto_dual_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*mac_decrypt_final)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*mac_decrypt_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_dual_data_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*mac_verify_decrypt_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_dual_data_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
} crypto_dual_cipher_mac_ops_t;
/*
* The crypto_random_number_ops structure contains pointers to random
* number operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_random_number_ops {
int (*seed_random)(crypto_provider_handle_t, crypto_session_id_t,
uchar_t *, size_t, uint_t, uint32_t, crypto_req_handle_t);
int (*generate_random)(crypto_provider_handle_t, crypto_session_id_t,
uchar_t *, size_t, crypto_req_handle_t);
} crypto_random_number_ops_t;
/*
* Flag values for seed_random.
*/
#define CRYPTO_SEED_NOW 0x00000001
/*
* The crypto_session_ops structure contains pointers to session
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_session_ops {
int (*session_open)(crypto_provider_handle_t, crypto_session_id_t *,
crypto_req_handle_t);
int (*session_close)(crypto_provider_handle_t, crypto_session_id_t,
crypto_req_handle_t);
int (*session_login)(crypto_provider_handle_t, crypto_session_id_t,
crypto_user_type_t, char *, size_t, crypto_req_handle_t);
int (*session_logout)(crypto_provider_handle_t, crypto_session_id_t,
crypto_req_handle_t);
} crypto_session_ops_t;
/*
* The crypto_object_ops structure contains pointers to object
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_object_ops {
int (*object_create)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_req_handle_t);
int (*object_copy)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_id_t, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_req_handle_t);
int (*object_destroy)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_id_t, crypto_req_handle_t);
int (*object_get_size)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_id_t, size_t *, crypto_req_handle_t);
int (*object_get_attribute_value)(crypto_provider_handle_t,
crypto_session_id_t, crypto_object_id_t,
crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
int (*object_set_attribute_value)(crypto_provider_handle_t,
crypto_session_id_t, crypto_object_id_t,
crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
int (*object_find_init)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, void **,
crypto_req_handle_t);
int (*object_find)(crypto_provider_handle_t, void *,
crypto_object_id_t *, uint_t, uint_t *, crypto_req_handle_t);
int (*object_find_final)(crypto_provider_handle_t, void *,
crypto_req_handle_t);
} crypto_object_ops_t;
/*
* The crypto_key_ops structure contains pointers to key
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_key_ops {
int (*key_generate)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_req_handle_t);
int (*key_generate_pair)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_object_id_t *, crypto_req_handle_t);
int (*key_wrap)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_id_t *,
uchar_t *, size_t *, crypto_req_handle_t);
int (*key_unwrap)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, uchar_t *, size_t *,
crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_req_handle_t);
int (*key_derive)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
uint_t, crypto_object_id_t *, crypto_req_handle_t);
int (*key_check)(crypto_provider_handle_t, crypto_mechanism_t *,
crypto_key_t *);
} crypto_key_ops_t;
/*
* The crypto_provider_management_ops structure contains pointers
* to management operations for cryptographic providers. It is passed
* through the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_provider_management_ops {
int (*ext_info)(crypto_provider_handle_t,
crypto_provider_ext_info_t *, crypto_req_handle_t);
int (*init_token)(crypto_provider_handle_t, char *, size_t,
char *, crypto_req_handle_t);
int (*init_pin)(crypto_provider_handle_t, crypto_session_id_t,
char *, size_t, crypto_req_handle_t);
int (*set_pin)(crypto_provider_handle_t, crypto_session_id_t,
char *, size_t, char *, size_t, crypto_req_handle_t);
} crypto_provider_management_ops_t;
typedef struct crypto_mech_ops {
int (*copyin_mechanism)(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_mechanism_t *, int *, int);
int (*copyout_mechanism)(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_mechanism_t *, int *, int);
int (*free_mechanism)(crypto_provider_handle_t, crypto_mechanism_t *);
} crypto_mech_ops_t;
typedef struct crypto_nostore_key_ops {
int (*nostore_key_generate)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *,
crypto_object_attribute_t *, uint_t, crypto_object_attribute_t *,
uint_t, crypto_req_handle_t);
int (*nostore_key_generate_pair)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *,
crypto_object_attribute_t *, uint_t, crypto_object_attribute_t *,
uint_t, crypto_object_attribute_t *, uint_t,
crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
int (*nostore_key_derive)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
uint_t, crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
} crypto_nostore_key_ops_t;
/*
* The crypto_ops(9S) structure contains the structures containing
* the pointers to functions implemented by cryptographic providers.
* It is specified as part of the crypto_provider_info(9S)
* supplied by a provider when it registers with the kernel
* by calling crypto_register_provider(9F).
*/
typedef struct crypto_ops_v1 {
crypto_control_ops_t *co_control_ops;
crypto_digest_ops_t *co_digest_ops;
crypto_cipher_ops_t *co_cipher_ops;
crypto_mac_ops_t *co_mac_ops;
crypto_sign_ops_t *co_sign_ops;
crypto_verify_ops_t *co_verify_ops;
crypto_dual_ops_t *co_dual_ops;
crypto_dual_cipher_mac_ops_t *co_dual_cipher_mac_ops;
crypto_random_number_ops_t *co_random_ops;
crypto_session_ops_t *co_session_ops;
crypto_object_ops_t *co_object_ops;
crypto_key_ops_t *co_key_ops;
crypto_provider_management_ops_t *co_provider_ops;
crypto_ctx_ops_t *co_ctx_ops;
} crypto_ops_v1_t;
typedef struct crypto_ops_v2 {
crypto_ops_v1_t v1_ops;
crypto_mech_ops_t *co_mech_ops;
} crypto_ops_v2_t;
typedef struct crypto_ops_v3 {
crypto_ops_v2_t v2_ops;
crypto_nostore_key_ops_t *co_nostore_key_ops;
} crypto_ops_v3_t;
typedef struct crypto_ops {
union {
crypto_ops_v3_t cou_v3;
crypto_ops_v2_t cou_v2;
crypto_ops_v1_t cou_v1;
} cou;
} crypto_ops_t;
#define co_control_ops cou.cou_v1.co_control_ops
#define co_digest_ops cou.cou_v1.co_digest_ops
#define co_cipher_ops cou.cou_v1.co_cipher_ops
#define co_mac_ops cou.cou_v1.co_mac_ops
#define co_sign_ops cou.cou_v1.co_sign_ops
#define co_verify_ops cou.cou_v1.co_verify_ops
#define co_dual_ops cou.cou_v1.co_dual_ops
#define co_dual_cipher_mac_ops cou.cou_v1.co_dual_cipher_mac_ops
#define co_random_ops cou.cou_v1.co_random_ops
#define co_session_ops cou.cou_v1.co_session_ops
#define co_object_ops cou.cou_v1.co_object_ops
#define co_key_ops cou.cou_v1.co_key_ops
#define co_provider_ops cou.cou_v1.co_provider_ops
#define co_ctx_ops cou.cou_v1.co_ctx_ops
#define co_mech_ops cou.cou_v2.co_mech_ops
#define co_nostore_key_ops cou.cou_v3.co_nostore_key_ops
/*
* The mechanism info structure crypto_mech_info_t contains a function group
* bit mask cm_func_group_mask. This field, of type crypto_func_group_t,
* specifies the provider entry point that can be used a particular
* mechanism. The function group mask is a combination of the following values.
*/
typedef uint32_t crypto_func_group_t;
#define CRYPTO_FG_ENCRYPT 0x00000001 /* encrypt_init() */
#define CRYPTO_FG_DECRYPT 0x00000002 /* decrypt_init() */
#define CRYPTO_FG_DIGEST 0x00000004 /* digest_init() */
#define CRYPTO_FG_SIGN 0x00000008 /* sign_init() */
#define CRYPTO_FG_SIGN_RECOVER 0x00000010 /* sign_recover_init() */
#define CRYPTO_FG_VERIFY 0x00000020 /* verify_init() */
#define CRYPTO_FG_VERIFY_RECOVER 0x00000040 /* verify_recover_init() */
#define CRYPTO_FG_GENERATE 0x00000080 /* key_generate() */
#define CRYPTO_FG_GENERATE_KEY_PAIR 0x00000100 /* key_generate_pair() */
#define CRYPTO_FG_WRAP 0x00000200 /* key_wrap() */
#define CRYPTO_FG_UNWRAP 0x00000400 /* key_unwrap() */
#define CRYPTO_FG_DERIVE 0x00000800 /* key_derive() */
#define CRYPTO_FG_MAC 0x00001000 /* mac_init() */
#define CRYPTO_FG_ENCRYPT_MAC 0x00002000 /* encrypt_mac_init() */
#define CRYPTO_FG_MAC_DECRYPT 0x00004000 /* decrypt_mac_init() */
#define CRYPTO_FG_ENCRYPT_ATOMIC 0x00008000 /* encrypt_atomic() */
#define CRYPTO_FG_DECRYPT_ATOMIC 0x00010000 /* decrypt_atomic() */
#define CRYPTO_FG_MAC_ATOMIC 0x00020000 /* mac_atomic() */
#define CRYPTO_FG_DIGEST_ATOMIC 0x00040000 /* digest_atomic() */
#define CRYPTO_FG_SIGN_ATOMIC 0x00080000 /* sign_atomic() */
#define CRYPTO_FG_SIGN_RECOVER_ATOMIC 0x00100000 /* sign_recover_atomic() */
#define CRYPTO_FG_VERIFY_ATOMIC 0x00200000 /* verify_atomic() */
#define CRYPTO_FG_VERIFY_RECOVER_ATOMIC 0x00400000 /* verify_recover_atomic() */
#define CRYPTO_FG_ENCRYPT_MAC_ATOMIC 0x00800000 /* encrypt_mac_atomic() */
#define CRYPTO_FG_MAC_DECRYPT_ATOMIC 0x01000000 /* mac_decrypt_atomic() */
#define CRYPTO_FG_RESERVED 0x80000000
/*
* Maximum length of the pi_provider_description field of the
* crypto_provider_info structure.
*/
#define CRYPTO_PROVIDER_DESCR_MAX_LEN 64
/* Bit mask for all the simple operations */
#define CRYPTO_FG_SIMPLEOP_MASK (CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | \
CRYPTO_FG_DIGEST | CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | CRYPTO_FG_MAC | \
CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | \
CRYPTO_FG_MAC_ATOMIC | CRYPTO_FG_DIGEST_ATOMIC | CRYPTO_FG_SIGN_ATOMIC | \
CRYPTO_FG_VERIFY_ATOMIC)
/* Bit mask for all the dual operations */
#define CRYPTO_FG_MAC_CIPHER_MASK (CRYPTO_FG_ENCRYPT_MAC | \
CRYPTO_FG_MAC_DECRYPT | CRYPTO_FG_ENCRYPT_MAC_ATOMIC | \
CRYPTO_FG_MAC_DECRYPT_ATOMIC)
/* Add other combos to CRYPTO_FG_DUAL_MASK */
#define CRYPTO_FG_DUAL_MASK CRYPTO_FG_MAC_CIPHER_MASK
/*
* The crypto_mech_info structure specifies one of the mechanisms
* supported by a cryptographic provider. The pi_mechanisms field of
* the crypto_provider_info structure contains a pointer to an array
* of crypto_mech_info's.
*/
typedef struct crypto_mech_info {
crypto_mech_name_t cm_mech_name;
crypto_mech_type_t cm_mech_number;
crypto_func_group_t cm_func_group_mask;
ssize_t cm_min_key_length;
ssize_t cm_max_key_length;
uint32_t cm_mech_flags;
} crypto_mech_info_t;
/* Alias the old name to the new name for compatibility. */
#define cm_keysize_unit cm_mech_flags
/*
* The following is used by a provider that sets
* CRYPTO_HASH_NO_UPDATE. It needs to specify the maximum
* input data size it can digest in this field.
*/
#define cm_max_input_length cm_max_key_length
/*
* crypto_kcf_provider_handle_t is a handle allocated by the kernel.
* It is returned after the provider registers with
* crypto_register_provider(), and must be specified by the provider
* when calling crypto_unregister_provider(), and
* crypto_provider_notification().
*/
typedef uint_t crypto_kcf_provider_handle_t;
/*
* Provider information. Passed as argument to crypto_register_provider(9F).
* Describes the provider and its capabilities. Multiple providers can
* register for the same device instance. In this case, the same
* pi_provider_dev must be specified with a different pi_provider_handle.
*/
typedef struct crypto_provider_info_v1 {
uint_t pi_interface_version;
char *pi_provider_description;
crypto_provider_type_t pi_provider_type;
crypto_provider_handle_t pi_provider_handle;
crypto_ops_t *pi_ops_vector;
uint_t pi_mech_list_count;
crypto_mech_info_t *pi_mechanisms;
uint_t pi_logical_provider_count;
crypto_kcf_provider_handle_t *pi_logical_providers;
} crypto_provider_info_v1_t;
typedef struct crypto_provider_info_v2 {
crypto_provider_info_v1_t v1_info;
uint_t pi_flags;
} crypto_provider_info_v2_t;
typedef struct crypto_provider_info {
union {
crypto_provider_info_v2_t piu_v2;
crypto_provider_info_v1_t piu_v1;
} piu;
} crypto_provider_info_t;
#define pi_interface_version piu.piu_v1.pi_interface_version
#define pi_provider_description piu.piu_v1.pi_provider_description
#define pi_provider_type piu.piu_v1.pi_provider_type
#define pi_provider_handle piu.piu_v1.pi_provider_handle
#define pi_ops_vector piu.piu_v1.pi_ops_vector
#define pi_mech_list_count piu.piu_v1.pi_mech_list_count
#define pi_mechanisms piu.piu_v1.pi_mechanisms
#define pi_logical_provider_count piu.piu_v1.pi_logical_provider_count
#define pi_logical_providers piu.piu_v1.pi_logical_providers
#define pi_flags piu.piu_v2.pi_flags
/* hidden providers can only be accessed via a logical provider */
#define CRYPTO_HIDE_PROVIDER 0x00000001
/*
* provider can not do multi-part digest (updates) and has a limit
* on maximum input data that it can digest.
*/
#define CRYPTO_HASH_NO_UPDATE 0x00000002
/* provider can handle the request without returning a CRYPTO_QUEUED */
#define CRYPTO_SYNCHRONOUS 0x00000004
#define CRYPTO_PIFLAGS_RESERVED2 0x40000000
#define CRYPTO_PIFLAGS_RESERVED1 0x80000000
/*
* Provider status passed by a provider to crypto_provider_notification(9F)
* and returned by the provider_stauts(9E) entry point.
*/
#define CRYPTO_PROVIDER_READY 0
#define CRYPTO_PROVIDER_BUSY 1
#define CRYPTO_PROVIDER_FAILED 2
/*
* Functions exported by Solaris to cryptographic providers. Providers
* call these functions to register and unregister, notify the kernel
* of state changes, and notify the kernel when a asynchronous request
* completed.
*/
extern int crypto_register_provider(crypto_provider_info_t *,
crypto_kcf_provider_handle_t *);
extern int crypto_unregister_provider(crypto_kcf_provider_handle_t);
extern void crypto_provider_notification(crypto_kcf_provider_handle_t, uint_t);
extern void crypto_op_notification(crypto_req_handle_t, int);
extern int crypto_kmflag(crypto_req_handle_t);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_SPI_H */

View File

@ -0,0 +1,307 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _IA32_SYS_ASM_LINKAGE_H
#define _IA32_SYS_ASM_LINKAGE_H
#include <sys/stack.h>
#include <sys/trap.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASM /* The remainder of this file is only for assembly files */
/*
* make annoying differences in assembler syntax go away
*/
/*
* D16 and A16 are used to insert instructions prefixes; the
* macros help the assembler code be slightly more portable.
*/
#if !defined(__GNUC_AS__)
/*
* /usr/ccs/bin/as prefixes are parsed as separate instructions
*/
#define D16 data16;
#define A16 addr16;
/*
* (There are some weird constructs in constant expressions)
*/
#define _CONST(const) [const]
#define _BITNOT(const) -1!_CONST(const)
#define _MUL(a, b) _CONST(a \* b)
#else
/*
* Why not use the 'data16' and 'addr16' prefixes .. well, the
* assembler doesn't quite believe in real mode, and thus argues with
* us about what we're trying to do.
*/
#define D16 .byte 0x66;
#define A16 .byte 0x67;
#define _CONST(const) (const)
#define _BITNOT(const) ~_CONST(const)
#define _MUL(a, b) _CONST(a * b)
#endif
/*
* C pointers are different sizes between i386 and amd64.
* These constants can be used to compute offsets into pointer arrays.
*/
#if defined(__amd64)
#define CLONGSHIFT 3
#define CLONGSIZE 8
#define CLONGMASK 7
#elif defined(__i386)
#define CLONGSHIFT 2
#define CLONGSIZE 4
#define CLONGMASK 3
#endif
/*
* Since we know we're either ILP32 or LP64 ..
*/
#define CPTRSHIFT CLONGSHIFT
#define CPTRSIZE CLONGSIZE
#define CPTRMASK CLONGMASK
#if CPTRSIZE != (1 << CPTRSHIFT) || CLONGSIZE != (1 << CLONGSHIFT)
#error "inconsistent shift constants"
#endif
#if CPTRMASK != (CPTRSIZE - 1) || CLONGMASK != (CLONGSIZE - 1)
#error "inconsistent mask constants"
#endif
#define ASM_ENTRY_ALIGN 16
/*
* SSE register alignment and save areas
*/
#define XMM_SIZE 16
#define XMM_ALIGN 16
#if defined(__amd64)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp; \
movq %rsp, sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp
#elif defined(__i386)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp; \
movl %esp, sreg; \
addl $XMM_ALIGN, sreg; \
andl $_BITNOT(XMM_ALIGN-1), sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp;
#endif /* __i386 */
/*
* profiling causes definitions of the MCOUNT and RTMCOUNT
* particular to the type
*/
#ifdef GPROF
#define MCOUNT(x) \
pushl %ebp; \
movl %esp, %ebp; \
call _mcount; \
popl %ebp
#endif /* GPROF */
#ifdef PROF
#define MCOUNT(x) \
/* CSTYLED */ \
.lcomm .L_/**/x/**/1, 4, 4; \
pushl %ebp; \
movl %esp, %ebp; \
/* CSTYLED */ \
movl $.L_/**/x/**/1, %edx; \
call _mcount; \
popl %ebp
#endif /* PROF */
/*
* if we are not profiling, MCOUNT should be defined to nothing
*/
#if !defined(PROF) && !defined(GPROF)
#define MCOUNT(x)
#endif /* !defined(PROF) && !defined(GPROF) */
#define RTMCOUNT(x) MCOUNT(x)
/*
* Macro to define weak symbol aliases. These are similar to the ANSI-C
* #pragma weak _name = name
* except a compiler can determine type. The assembler must be told. Hence,
* the second parameter must be the type of the symbol (i.e.: function,...)
*/
#define ANSI_PRAGMA_WEAK(sym, stype) \
/* CSTYLED */ \
.weak _/**/sym; \
/* CSTYLED */ \
.type _/**/sym, @stype; \
/* CSTYLED */ \
_/**/sym = sym
/*
* Like ANSI_PRAGMA_WEAK(), but for unrelated names, as in:
* #pragma weak sym1 = sym2
*/
#define ANSI_PRAGMA_WEAK2(sym1, sym2, stype) \
.weak sym1; \
.type sym1, @stype; \
sym1 = sym2
/*
* ENTRY provides the standard procedure entry code and an easy way to
* insert the calls to mcount for profiling. ENTRY_NP is identical, but
* never calls mcount.
*/
#define ENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define RTENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: RTMCOUNT(x)
/*
* ENTRY2 is identical to ENTRY but provides two labels for the entry point.
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y:
/*
* ALTENTRY provides for additional entry points.
*/
#define ALTENTRY(x) \
.globl x; \
.type x, @function; \
x:
/*
* DGDEF and DGDEF2 provide global data declarations.
*
* DGDEF provides a word aligned word of storage.
*
* DGDEF2 allocates "sz" bytes of storage with **NO** alignment. This
* implies this macro is best used for byte arrays.
*
* DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
*/
#define DGDEF2(name, sz) \
.data; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF3(name, sz, algn) \
.data; \
.align algn; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF(name) DGDEF3(name, 4, 4)
/*
* SET_SIZE trails a function and set the size for the ELF symbol table.
*/
#define SET_SIZE(x) \
.size x, [.-x]
/*
* NWORD provides native word value.
*/
#if defined(__amd64)
/*CSTYLED*/
#define NWORD quad
#elif defined(__i386)
#define NWORD long
#endif /* __i386 */
#endif /* _ASM */
#ifdef __cplusplus
}
#endif
#endif /* _IA32_SYS_ASM_LINKAGE_H */

View File

@ -0,0 +1,160 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _IA32_SYS_STACK_H
#define _IA32_SYS_STACK_H
#if !defined(_ASM)
#include <sys/types.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* In the x86 world, a stack frame looks like this:
*
* |--------------------------|
* 4n+8(%ebp) ->| argument word n |
* | ... | (Previous frame)
* 8(%ebp) ->| argument word 0 |
* |--------------------------|--------------------
* 4(%ebp) ->| return address |
* |--------------------------|
* 0(%ebp) ->| previous %ebp (optional) |
* |--------------------------|
* -4(%ebp) ->| unspecified | (Current frame)
* | ... |
* 0(%esp) ->| variable size |
* |--------------------------|
*/
/*
* Stack alignment macros.
*/
#define STACK_ALIGN32 4
#define STACK_ENTRY_ALIGN32 4
#define STACK_BIAS32 0
#define SA32(x) (((x)+(STACK_ALIGN32-1)) & ~(STACK_ALIGN32-1))
#define STACK_RESERVE32 0
#define MINFRAME32 0
#if defined(__amd64)
/*
* In the amd64 world, a stack frame looks like this:
*
* |--------------------------|
* 8n+16(%rbp)->| argument word n |
* | ... | (Previous frame)
* 16(%rbp) ->| argument word 0 |
* |--------------------------|--------------------
* 8(%rbp) ->| return address |
* |--------------------------|
* 0(%rbp) ->| previous %rbp |
* |--------------------------|
* -8(%rbp) ->| unspecified | (Current frame)
* | ... |
* 0(%rsp) ->| variable size |
* |--------------------------|
* -128(%rsp) ->| reserved for function |
* |--------------------------|
*
* The end of the input argument area must be aligned on a 16-byte
* boundary; i.e. (%rsp - 8) % 16 == 0 at function entry.
*
* The 128-byte location beyond %rsp is considered to be reserved for
* functions and is NOT modified by signal handlers. It can be used
* to store temporary data that is not needed across function calls.
*/
/*
* Stack alignment macros.
*/
#define STACK_ALIGN64 16
#define STACK_ENTRY_ALIGN64 8
#define STACK_BIAS64 0
#define SA64(x) (((x)+(STACK_ALIGN64-1)) & ~(STACK_ALIGN64-1))
#define STACK_RESERVE64 128
#define MINFRAME64 0
#define STACK_ALIGN STACK_ALIGN64
#define STACK_ENTRY_ALIGN STACK_ENTRY_ALIGN64
#define STACK_BIAS STACK_BIAS64
#define SA(x) SA64(x)
#define STACK_RESERVE STACK_RESERVE64
#define MINFRAME MINFRAME64
#elif defined(__i386)
#define STACK_ALIGN STACK_ALIGN32
#define STACK_ENTRY_ALIGN STACK_ENTRY_ALIGN32
#define STACK_BIAS STACK_BIAS32
#define SA(x) SA32(x)
#define STACK_RESERVE STACK_RESERVE32
#define MINFRAME MINFRAME32
#endif /* __i386 */
#if defined(_KERNEL) && !defined(_ASM)
#if defined(DEBUG)
#if STACK_ALIGN == 4
#define ASSERT_STACK_ALIGNED() \
{ \
uint32_t __tmp; \
ASSERT((((uintptr_t)&__tmp) & (STACK_ALIGN - 1)) == 0); \
}
#elif (STACK_ALIGN == 16) && (_LONG_DOUBLE_ALIGNMENT == 16)
#define ASSERT_STACK_ALIGNED() \
{ \
long double __tmp; \
ASSERT((((uintptr_t)&__tmp) & (STACK_ALIGN - 1)) == 0); \
}
#endif
#else /* DEBUG */
#define ASSERT_STACK_ALIGNED()
#endif /* DEBUG */
struct regs;
void traceregs(struct regs *);
void traceback(caddr_t);
#endif /* defined(_KERNEL) && !defined(_ASM) */
#define STACK_GROWTH_DOWN /* stacks grow from high to low addresses */
#ifdef __cplusplus
}
#endif
#endif /* _IA32_SYS_STACK_H */

View File

@ -0,0 +1,107 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
/* All Rights Reserved */
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _IA32_SYS_TRAP_H
#define _IA32_SYS_TRAP_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* Trap type values
*/
#define T_ZERODIV 0x0 /* #de divide by 0 error */
#define T_SGLSTP 0x1 /* #db single step */
#define T_NMIFLT 0x2 /* NMI */
#define T_BPTFLT 0x3 /* #bp breakpoint fault, INT3 insn */
#define T_OVFLW 0x4 /* #of INTO overflow fault */
#define T_BOUNDFLT 0x5 /* #br BOUND insn fault */
#define T_ILLINST 0x6 /* #ud invalid opcode fault */
#define T_NOEXTFLT 0x7 /* #nm device not available: x87 */
#define T_DBLFLT 0x8 /* #df double fault */
#define T_EXTOVRFLT 0x9 /* [not generated: 386 only] */
#define T_TSSFLT 0xa /* #ts invalid TSS fault */
#define T_SEGFLT 0xb /* #np segment not present fault */
#define T_STKFLT 0xc /* #ss stack fault */
#define T_GPFLT 0xd /* #gp general protection fault */
#define T_PGFLT 0xe /* #pf page fault */
#define T_EXTERRFLT 0x10 /* #mf x87 FPU error fault */
#define T_ALIGNMENT 0x11 /* #ac alignment check error */
#define T_MCE 0x12 /* #mc machine check exception */
#define T_SIMDFPE 0x13 /* #xm SSE/SSE exception */
#define T_DBGENTR 0x14 /* debugger entry */
#define T_ENDPERR 0x21 /* emulated extension error flt */
#define T_ENOEXTFLT 0x20 /* emulated ext not present */
#define T_FASTTRAP 0xd2 /* fast system call */
#define T_SYSCALLINT 0x91 /* general system call */
#define T_DTRACE_RET 0x7f /* DTrace pid return */
#define T_INT80 0x80 /* int80 handler for linux emulation */
#define T_SOFTINT 0x50fd /* pseudo softint trap type */
/*
* Pseudo traps.
*/
#define T_INTERRUPT 0x100
#define T_FAULT 0x200
#define T_AST 0x400
#define T_SYSCALL 0x180
/*
* Values of error code on stack in case of page fault
*/
#define PF_ERR_MASK 0x01 /* Mask for error bit */
#define PF_ERR_PAGE 0x00 /* page not present */
#define PF_ERR_PROT 0x01 /* protection error */
#define PF_ERR_WRITE 0x02 /* fault caused by write (else read) */
#define PF_ERR_USER 0x04 /* processor was in user mode */
/* (else supervisor) */
#define PF_ERR_EXEC 0x10 /* attempt to execute a No eXec page (AMD) */
/*
* Definitions for fast system call subfunctions
*/
#define T_FNULL 0 /* Null trap for testing */
#define T_FGETFP 1 /* Get emulated FP context */
#define T_FSETFP 2 /* Set emulated FP context */
#define T_GETHRTIME 3 /* Get high resolution time */
#define T_GETHRVTIME 4 /* Get high resolution virtual time */
#define T_GETHRESTIME 5 /* Get high resolution time */
#define T_GETLGRP 6 /* Get home lgrpid */
#define T_LASTFAST 6 /* Last valid subfunction */
#ifdef __cplusplus
}
#endif
#endif /* _IA32_SYS_TRAP_H */

View File

@ -0,0 +1,477 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_MODCTL_H
#define _SYS_MODCTL_H
/*
* loadable module support.
*/
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
struct modlmisc;
struct modlinkage;
/*
* The following structure defines the operations used by modctl
* to load and unload modules. Each supported loadable module type
* requires a set of mod_ops.
*/
struct mod_ops {
int (*modm_install)(struct modlmisc *, struct modlinkage *);
int (*modm_remove)(struct modlmisc *, struct modlinkage *);
int (*modm_info)(void *, struct modlinkage *, int *);
};
/*
* The defined set of mod_ops structures for each loadable module type
* Defined in modctl.c
*/
extern struct mod_ops mod_brandops;
#if defined(__i386) || defined(__amd64)
extern struct mod_ops mod_cpuops;
#endif
extern struct mod_ops mod_cryptoops;
extern struct mod_ops mod_driverops;
extern struct mod_ops mod_execops;
extern struct mod_ops mod_fsops;
extern struct mod_ops mod_miscops;
extern struct mod_ops mod_schedops;
extern struct mod_ops mod_strmodops;
extern struct mod_ops mod_syscallops;
extern struct mod_ops mod_sockmodops;
#ifdef _SYSCALL32_IMPL
extern struct mod_ops mod_syscallops32;
#endif
extern struct mod_ops mod_dacfops;
extern struct mod_ops mod_ippops;
extern struct mod_ops mod_pcbeops;
extern struct mod_ops mod_devfsops;
extern struct mod_ops mod_kiconvops;
/*
* Definitions for the module specific linkage structures.
* The first two fields are the same in all of the structures.
* The linkinfo is for informational purposes only and is returned by
* modctl with the MODINFO cmd.
*/
/* For cryptographic providers */
struct modlcrypto {
struct mod_ops *crypto_modops;
char *crypto_linkinfo;
};
/* For misc */
struct modlmisc {
struct mod_ops *misc_modops;
char *misc_linkinfo;
};
/*
* Revision number of loadable modules support. This is the value
* that must be used in the modlinkage structure.
*/
#define MODREV_1 1
/*
* The modlinkage structure is the structure that the module writer
* provides to the routines to install, remove, and stat a module.
* The ml_linkage element is an array of pointers to linkage structures.
* For most modules there is only one linkage structure. We allocate
* enough space for 3 linkage structures which happens to be the most
* we have in any sun supplied module. For those modules with more
* than 3 linkage structures (which is very unlikely), a modlinkage
* structure must be kmem_alloc'd in the module wrapper to be big enough
* for all of the linkage structures.
*/
struct modlinkage {
int ml_rev; /* rev of loadable modules system */
#ifdef _LP64
void *ml_linkage[7]; /* more space in 64-bit OS */
#else
void *ml_linkage[4]; /* NULL terminated list of */
/* linkage structures */
#endif
};
/*
* commands. These are the commands supported by the modctl system call.
*/
#define MODLOAD 0
#define MODUNLOAD 1
#define MODINFO 2
#define MODRESERVED 3
#define MODSETMINIROOT 4
#define MODADDMAJBIND 5
#define MODGETPATH 6
#define MODREADSYSBIND 7
#define MODGETMAJBIND 8
#define MODGETNAME 9
#define MODSIZEOF_DEVID 10
#define MODGETDEVID 11
#define MODSIZEOF_MINORNAME 12
#define MODGETMINORNAME 13
#define MODGETPATHLEN 14
#define MODEVENTS 15
#define MODGETFBNAME 16
#define MODREREADDACF 17
#define MODLOADDRVCONF 18
#define MODUNLOADDRVCONF 19
#define MODREMMAJBIND 20
#define MODDEVT2INSTANCE 21
#define MODGETDEVFSPATH_LEN 22
#define MODGETDEVFSPATH 23
#define MODDEVID2PATHS 24
#define MODSETDEVPOLICY 26
#define MODGETDEVPOLICY 27
#define MODALLOCPRIV 28
#define MODGETDEVPOLICYBYNAME 29
#define MODLOADMINORPERM 31
#define MODADDMINORPERM 32
#define MODREMMINORPERM 33
#define MODREMDRVCLEANUP 34
#define MODDEVEXISTS 35
#define MODDEVREADDIR 36
#define MODDEVNAME 37
#define MODGETDEVFSPATH_MI_LEN 38
#define MODGETDEVFSPATH_MI 39
#define MODRETIRE 40
#define MODUNRETIRE 41
#define MODISRETIRED 42
#define MODDEVEMPTYDIR 43
#define MODREMDRVALIAS 44
/*
* sub cmds for MODEVENTS
*/
#define MODEVENTS_FLUSH 0
#define MODEVENTS_FLUSH_DUMP 1
#define MODEVENTS_SET_DOOR_UPCALL_FILENAME 2
#define MODEVENTS_GETDATA 3
#define MODEVENTS_FREEDATA 4
#define MODEVENTS_POST_EVENT 5
#define MODEVENTS_REGISTER_EVENT 6
/*
* devname subcmds for MODDEVNAME
*/
#define MODDEVNAME_LOOKUPDOOR 0
#define MODDEVNAME_DEVFSADMNODE 1
#define MODDEVNAME_NSMAPS 2
#define MODDEVNAME_PROFILE 3
#define MODDEVNAME_RECONFIG 4
#define MODDEVNAME_SYSAVAIL 5
/*
* Data structure passed to modconfig command in kernel to build devfs tree
*/
struct aliases {
struct aliases *a_next;
char *a_name;
int a_len;
};
#define MAXMODCONFNAME 256
struct modconfig {
char drvname[MAXMODCONFNAME];
char drvclass[MAXMODCONFNAME];
int major;
int flags;
int num_aliases;
struct aliases *ap;
};
#if defined(_SYSCALL32)
struct aliases32 {
caddr32_t a_next;
caddr32_t a_name;
int32_t a_len;
};
struct modconfig32 {
char drvname[MAXMODCONFNAME];
char drvclass[MAXMODCONFNAME];
int32_t major;
int32_t flags;
int32_t num_aliases;
caddr32_t ap;
};
#endif /* _SYSCALL32 */
/* flags for modconfig */
#define MOD_UNBIND_OVERRIDE 0x01 /* fail unbind if in use */
/*
* Max module path length
*/
#define MOD_MAXPATH 256
/*
* Default search path for modules ADDITIONAL to the directory
* where the kernel components we booted from are.
*
* Most often, this will be "/platform/{platform}/kernel /kernel /usr/kernel",
* but we don't wire it down here.
*/
#define MOD_DEFPATH "/kernel /usr/kernel"
/*
* Default file name extension for autoloading modules.
*/
#define MOD_DEFEXT ""
/*
* Parameters for modinfo
*/
#define MODMAXNAMELEN 32 /* max module name length */
#define MODMAXLINKINFOLEN 32 /* max link info length */
/*
* Module specific information.
*/
struct modspecific_info {
char msi_linkinfo[MODMAXLINKINFOLEN]; /* name in linkage struct */
int msi_p0; /* module specific information */
};
/*
* Structure returned by modctl with MODINFO command.
*/
#define MODMAXLINK 10 /* max linkages modinfo can handle */
struct modinfo {
int mi_info; /* Flags for info wanted */
int mi_state; /* Flags for module state */
int mi_id; /* id of this loaded module */
int mi_nextid; /* id of next module or -1 */
caddr_t mi_base; /* virtual addr of text */
size_t mi_size; /* size of module in bytes */
int mi_rev; /* loadable modules rev */
int mi_loadcnt; /* # of times loaded */
char mi_name[MODMAXNAMELEN]; /* name of module */
struct modspecific_info mi_msinfo[MODMAXLINK];
/* mod specific info */
};
#if defined(_SYSCALL32)
#define MODMAXNAMELEN32 32 /* max module name length */
#define MODMAXLINKINFOLEN32 32 /* max link info length */
#define MODMAXLINK32 10 /* max linkages modinfo can handle */
struct modspecific_info32 {
char msi_linkinfo[MODMAXLINKINFOLEN32]; /* name in linkage struct */
int32_t msi_p0; /* module specific information */
};
struct modinfo32 {
int32_t mi_info; /* Flags for info wanted */
int32_t mi_state; /* Flags for module state */
int32_t mi_id; /* id of this loaded module */
int32_t mi_nextid; /* id of next module or -1 */
caddr32_t mi_base; /* virtual addr of text */
uint32_t mi_size; /* size of module in bytes */
int32_t mi_rev; /* loadable modules rev */
int32_t mi_loadcnt; /* # of times loaded */
char mi_name[MODMAXNAMELEN32]; /* name of module */
struct modspecific_info32 mi_msinfo[MODMAXLINK32];
/* mod specific info */
};
#endif /* _SYSCALL32 */
/* Values for mi_info flags */
#define MI_INFO_ONE 1
#define MI_INFO_ALL 2
#define MI_INFO_CNT 4
#define MI_INFO_LINKAGE 8 /* used internally to extract modlinkage */
/*
* MI_INFO_NOBASE indicates caller does not need mi_base. Failure to use this
* flag may lead 32-bit apps to receive an EOVERFLOW error from modctl(MODINFO)
* when used with a 64-bit kernel.
*/
#define MI_INFO_NOBASE 16
/* Values for mi_state */
#define MI_LOADED 1
#define MI_INSTALLED 2
/*
* Macros to vector to the appropriate module specific routine.
*/
#define MODL_INSTALL(MODL, MODLP) \
(*(MODL)->misc_modops->modm_install)(MODL, MODLP)
#define MODL_REMOVE(MODL, MODLP) \
(*(MODL)->misc_modops->modm_remove)(MODL, MODLP)
#define MODL_INFO(MODL, MODLP, P0) \
(*(MODL)->misc_modops->modm_info)(MODL, MODLP, P0)
/*
* Definitions for stubs
*/
struct mod_stub_info {
uintptr_t mods_func_adr;
struct mod_modinfo *mods_modinfo;
uintptr_t mods_stub_adr;
int (*mods_errfcn)(void);
int mods_flag; /* flags defined below */
};
/*
* Definitions for mods_flag.
*/
#define MODS_WEAK 0x01 /* weak stub (not loaded if called) */
#define MODS_NOUNLOAD 0x02 /* module not unloadable (no _fini()) */
#define MODS_INSTALLED 0x10 /* module installed */
struct mod_modinfo {
char *modm_module_name;
struct modctl *mp;
struct mod_stub_info modm_stubs[1];
};
struct modctl_list {
struct modctl_list *modl_next;
struct modctl *modl_modp;
};
/*
* Structure to manage a loadable module.
* Note: the module (mod_mp) structure's "text" and "text_size" information
* are replicated in the modctl structure so that mod_containing_pc()
* doesn't have to grab any locks (modctls are persistent; modules are not.)
*/
typedef struct modctl {
struct modctl *mod_next; /* &modules based list */
struct modctl *mod_prev;
int mod_id;
void *mod_mp;
kthread_t *mod_inprogress_thread;
struct mod_modinfo *mod_modinfo;
struct modlinkage *mod_linkage;
char *mod_filename;
char *mod_modname;
char mod_busy; /* inprogress_thread has locked */
char mod_want; /* someone waiting for unlock */
char mod_prim; /* primary module */
int mod_ref; /* ref count - from dependent or stub */
char mod_loaded; /* module in memory */
char mod_installed; /* post _init pre _fini */
char mod_loadflags;
char mod_delay_unload; /* deferred unload */
struct modctl_list *mod_requisites; /* mods this one depends on. */
void *__unused; /* NOTE: reuse (same size) is OK, */
/* deletion causes mdb.vs.core issues */
int mod_loadcnt; /* number of times mod was loaded */
int mod_nenabled; /* # of enabled DTrace probes in mod */
char *mod_text;
size_t mod_text_size;
int mod_gencount; /* # times loaded/unloaded */
struct modctl *mod_requisite_loading; /* mod circular dependency */
} modctl_t;
/*
* mod_loadflags
*/
#define MOD_NOAUTOUNLOAD 0x1 /* Auto mod-unloader skips this mod */
#define MOD_NONOTIFY 0x2 /* No krtld notifications on (un)load */
#define MOD_NOUNLOAD 0x4 /* Assume EBUSY for all _fini's */
#define MOD_BIND_HASHSIZE 64
#define MOD_BIND_HASHMASK (MOD_BIND_HASHSIZE-1)
typedef int modid_t;
/*
* global function and data declarations
*/
extern kmutex_t mod_lock;
extern char *systemfile;
extern char **syscallnames;
extern int moddebug;
/*
* this is the head of a doubly linked list. Only the next and prev
* pointers are used
*/
extern modctl_t modules;
/*
* Only the following are part of the DDI/DKI
*/
extern int mod_install(struct modlinkage *);
extern int mod_remove(struct modlinkage *);
extern int mod_info(struct modlinkage *, struct modinfo *);
/*
* bit definitions for moddebug.
*/
#define MODDEBUG_LOADMSG 0x80000000 /* print "[un]loading..." msg */
#define MODDEBUG_ERRMSG 0x40000000 /* print detailed error msgs */
#define MODDEBUG_LOADMSG2 0x20000000 /* print 2nd level msgs */
#define MODDEBUG_RETIRE 0x10000000 /* print retire msgs */
#define MODDEBUG_BINDING 0x00040000 /* driver/alias binding */
#define MODDEBUG_FINI_EBUSY 0x00020000 /* pretend fini returns EBUSY */
#define MODDEBUG_NOAUL_IPP 0x00010000 /* no Autounloading ipp mods */
#define MODDEBUG_NOAUL_DACF 0x00008000 /* no Autounloading dacf mods */
#define MODDEBUG_KEEPTEXT 0x00004000 /* keep text after unloading */
#define MODDEBUG_NOAUL_DRV 0x00001000 /* no Autounloading Drivers */
#define MODDEBUG_NOAUL_EXEC 0x00000800 /* no Autounloading Execs */
#define MODDEBUG_NOAUL_FS 0x00000400 /* no Autounloading File sys */
#define MODDEBUG_NOAUL_MISC 0x00000200 /* no Autounloading misc */
#define MODDEBUG_NOAUL_SCHED 0x00000100 /* no Autounloading scheds */
#define MODDEBUG_NOAUL_STR 0x00000080 /* no Autounloading streams */
#define MODDEBUG_NOAUL_SYS 0x00000040 /* no Autounloading syscalls */
#define MODDEBUG_NOCTF 0x00000020 /* do not load CTF debug data */
#define MODDEBUG_NOAUTOUNLOAD 0x00000010 /* no autounloading at all */
#define MODDEBUG_DDI_MOD 0x00000008 /* ddi_mod{open,sym,close} */
#define MODDEBUG_MP_MATCH 0x00000004 /* dev_minorperm */
#define MODDEBUG_MINORPERM 0x00000002 /* minor perm modctls */
#define MODDEBUG_USERDEBUG 0x00000001 /* bpt after init_module() */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_MODCTL_H */

View File

@ -0,0 +1,147 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_MODHASH_H
#define _SYS_MODHASH_H
/*
* Generic hash implementation for the kernel.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
/*
* Opaque data types for storing keys and values
*/
typedef void *mod_hash_val_t;
typedef void *mod_hash_key_t;
/*
* Opaque data type for reservation
*/
typedef void *mod_hash_hndl_t;
/*
* Opaque type for hash itself.
*/
struct mod_hash;
typedef struct mod_hash mod_hash_t;
/*
* String hash table
*/
mod_hash_t *mod_hash_create_strhash_nodtr(char *, size_t,
void (*)(mod_hash_val_t));
mod_hash_t *mod_hash_create_strhash(char *, size_t, void (*)(mod_hash_val_t));
void mod_hash_destroy_strhash(mod_hash_t *);
int mod_hash_strkey_cmp(mod_hash_key_t, mod_hash_key_t);
void mod_hash_strkey_dtor(mod_hash_key_t);
void mod_hash_strval_dtor(mod_hash_val_t);
uint_t mod_hash_bystr(void *, mod_hash_key_t);
/*
* Pointer hash table
*/
mod_hash_t *mod_hash_create_ptrhash(char *, size_t, void (*)(mod_hash_val_t),
size_t);
void mod_hash_destroy_ptrhash(mod_hash_t *);
int mod_hash_ptrkey_cmp(mod_hash_key_t, mod_hash_key_t);
uint_t mod_hash_byptr(void *, mod_hash_key_t);
/*
* ID hash table
*/
mod_hash_t *mod_hash_create_idhash(char *, size_t, void (*)(mod_hash_val_t));
void mod_hash_destroy_idhash(mod_hash_t *);
int mod_hash_idkey_cmp(mod_hash_key_t, mod_hash_key_t);
uint_t mod_hash_byid(void *, mod_hash_key_t);
uint_t mod_hash_iddata_gen(size_t);
/*
* Hash management functions
*/
mod_hash_t *mod_hash_create_extended(char *, size_t, void (*)(mod_hash_key_t),
void (*)(mod_hash_val_t), uint_t (*)(void *, mod_hash_key_t), void *,
int (*)(mod_hash_key_t, mod_hash_key_t), int);
void mod_hash_destroy_hash(mod_hash_t *);
void mod_hash_clear(mod_hash_t *);
/*
* Null key and value destructors
*/
void mod_hash_null_keydtor(mod_hash_key_t);
void mod_hash_null_valdtor(mod_hash_val_t);
/*
* Basic hash operations
*/
/*
* Error codes for insert, remove, find, destroy.
*/
#define MH_ERR_NOMEM -1
#define MH_ERR_DUPLICATE -2
#define MH_ERR_NOTFOUND -3
/*
* Return codes for hash walkers
*/
#define MH_WALK_CONTINUE 0
#define MH_WALK_TERMINATE 1
/*
* Basic hash operations
*/
int mod_hash_insert(mod_hash_t *, mod_hash_key_t, mod_hash_val_t);
int mod_hash_replace(mod_hash_t *, mod_hash_key_t, mod_hash_val_t);
int mod_hash_remove(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
int mod_hash_destroy(mod_hash_t *, mod_hash_key_t);
int mod_hash_find(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
int mod_hash_find_cb(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *,
void (*)(mod_hash_key_t, mod_hash_val_t));
int mod_hash_find_cb_rval(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *,
int (*)(mod_hash_key_t, mod_hash_val_t), int *);
void mod_hash_walk(mod_hash_t *,
uint_t (*)(mod_hash_key_t, mod_hash_val_t *, void *), void *);
/*
* Reserving hash operations
*/
int mod_hash_reserve(mod_hash_t *, mod_hash_hndl_t *);
int mod_hash_reserve_nosleep(mod_hash_t *, mod_hash_hndl_t *);
void mod_hash_cancel(mod_hash_t *, mod_hash_hndl_t *);
int mod_hash_insert_reserve(mod_hash_t *, mod_hash_key_t, mod_hash_val_t,
mod_hash_hndl_t);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_MODHASH_H */

View File

@ -0,0 +1,108 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_MODHASH_IMPL_H
#define _SYS_MODHASH_IMPL_H
/*
* Internal details for the kernel's generic hash implementation.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/modhash.h>
struct mod_hash_entry {
mod_hash_key_t mhe_key; /* stored hash key */
mod_hash_val_t mhe_val; /* stored hash value */
struct mod_hash_entry *mhe_next; /* next item in chain */
};
struct mod_hash_stat {
ulong_t mhs_hit; /* tried a 'find' and it succeeded */
ulong_t mhs_miss; /* tried a 'find' but it failed */
ulong_t mhs_coll; /* occur when insert fails because of dup's */
ulong_t mhs_nelems; /* total number of stored key/value pairs */
ulong_t mhs_nomem; /* number of times kmem_alloc failed */
};
struct mod_hash {
krwlock_t mh_contents; /* lock protecting contents */
char *mh_name; /* hash name */
int mh_sleep; /* kmem_alloc flag */
size_t mh_nchains; /* # of elements in mh_entries */
/* key and val destructor */
void (*mh_kdtor)(mod_hash_key_t);
void (*mh_vdtor)(mod_hash_val_t);
/* key comparator */
int (*mh_keycmp)(mod_hash_key_t, mod_hash_key_t);
/* hash algorithm, and algorithm-private data */
uint_t (*mh_hashalg)(void *, mod_hash_key_t);
void *mh_hashalg_data;
struct mod_hash *mh_next; /* next hash in list */
struct mod_hash_stat mh_stat;
struct mod_hash_entry *mh_entries[1];
};
/*
* MH_SIZE()
* Compute the size of a mod_hash_t, in bytes, given the number of
* elements it contains.
*/
#define MH_SIZE(n) \
(sizeof (mod_hash_t) + ((n) - 1) * (sizeof (struct mod_hash_entry *)))
/*
* Module initialization; called once.
*/
void mod_hash_fini(void);
void mod_hash_init(void);
/*
* Internal routines. Use directly with care.
*/
uint_t i_mod_hash(mod_hash_t *, mod_hash_key_t);
int i_mod_hash_insert_nosync(mod_hash_t *, mod_hash_key_t, mod_hash_val_t,
mod_hash_hndl_t);
int i_mod_hash_remove_nosync(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
int i_mod_hash_find_nosync(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
void i_mod_hash_walk_nosync(mod_hash_t *, uint_t (*)(mod_hash_key_t,
mod_hash_val_t *, void *), void *);
void i_mod_hash_clear_nosync(mod_hash_t *hash);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_MODHASH_IMPL_H */

View File

@ -0,0 +1,36 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_STACK_H
#define _SYS_STACK_H
#if defined(__i386) || defined(__amd64)
#include <sys/ia32/stack.h> /* XX64 x86/sys/stack.h */
#endif
#endif /* _SYS_STACK_H */

View File

@ -0,0 +1,36 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_TRAP_H
#define _SYS_TRAP_H
#if defined(__i386) || defined(__amd64)
#include <sys/ia32/trap.h> /* XX64 x86/sys/trap.h */
#endif
#endif /* _SYS_TRAP_H */

1437
module/icp/io/aes.c Normal file

File diff suppressed because it is too large Load Diff

1239
module/icp/io/sha1_mod.c Normal file

File diff suppressed because it is too large Load Diff

1307
module/icp/io/sha2_mod.c Normal file

File diff suppressed because it is too large Load Diff

171
module/icp/os/modconf.c Normal file
View File

@ -0,0 +1,171 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/modctl.h>
/*
* Null operations; used for uninitialized and "misc" modules.
*/
static int mod_null(struct modlmisc *, struct modlinkage *);
static int mod_infonull(void *, struct modlinkage *, int *);
/*
* Cryptographic Modules
*/
struct mod_ops mod_cryptoops = {
mod_null, mod_null, mod_infonull
};
/*
* Null operation; return 0.
*/
static int
mod_null(struct modlmisc *modl, struct modlinkage *modlp)
{
return (0);
}
/*
* Status for User modules.
*/
static int
mod_infonull(void *modl, struct modlinkage *modlp, int *p0)
{
*p0 = -1; /* for modinfo display */
return (0);
}
/*
* Install a module.
* (This routine is in the Solaris SPARC DDI/DKI)
*/
int
mod_install(struct modlinkage *modlp)
{
int retval = -1; /* No linkage structures */
struct modlmisc **linkpp;
struct modlmisc **linkpp1;
if (modlp->ml_rev != MODREV_1) {
cmn_err(CE_WARN, "mod_install: "
"modlinkage structure is not MODREV_1\n");
return (EINVAL);
}
linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
while (*linkpp != NULL) {
if ((retval = MODL_INSTALL(*linkpp, modlp)) != 0) {
linkpp1 = (struct modlmisc **)&modlp->ml_linkage[0];
while (linkpp1 != linkpp) {
MODL_REMOVE(*linkpp1, modlp); /* clean up */
linkpp1++;
}
break;
}
linkpp++;
}
return (retval);
}
static char *reins_err =
"Could not reinstall %s\nReboot to correct the problem";
/*
* Remove a module. This is called by the module wrapper routine.
* (This routine is in the Solaris SPARC DDI/DKI)
*/
int
mod_remove(struct modlinkage *modlp)
{
int retval = 0;
struct modlmisc **linkpp, *last_linkp;
linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
while (*linkpp != NULL) {
if ((retval = MODL_REMOVE(*linkpp, modlp)) != 0) {
last_linkp = *linkpp;
linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
while (*linkpp != last_linkp) {
if (MODL_INSTALL(*linkpp, modlp) != 0) {
cmn_err(CE_WARN, reins_err,
(*linkpp)->misc_linkinfo);
break;
}
linkpp++;
}
break;
}
linkpp++;
}
return (retval);
}
/*
* Get module status.
* (This routine is in the Solaris SPARC DDI/DKI)
*/
int
mod_info(struct modlinkage *modlp, struct modinfo *modinfop)
{
int i;
int retval = 0;
struct modspecific_info *msip;
struct modlmisc **linkpp;
modinfop->mi_rev = modlp->ml_rev;
linkpp = (struct modlmisc **)modlp->ml_linkage;
msip = &modinfop->mi_msinfo[0];
for (i = 0; i < MODMAXLINK; i++) {
if (*linkpp == NULL) {
msip->msi_linkinfo[0] = '\0';
} else {
(void) strncpy(msip->msi_linkinfo,
(*linkpp)->misc_linkinfo, MODMAXLINKINFOLEN);
retval = MODL_INFO(*linkpp, modlp, &msip->msi_p0);
if (retval != 0)
break;
linkpp++;
}
msip++;
}
if (modinfop->mi_info == MI_INFO_LINKAGE) {
/*
* Slight kludge used to extract the address of the
* modlinkage structure from the module (just after
* loading a module for the very first time)
*/
modinfop->mi_base = (void *)modlp;
}
if (retval == 0)
return (1);
return (0);
}

925
module/icp/os/modhash.c Normal file
View File

@ -0,0 +1,925 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* mod_hash: flexible hash table implementation.
*
* This is a reasonably fast, reasonably flexible hash table implementation
* which features pluggable hash algorithms to support storing arbitrary keys
* and values. It is designed to handle small (< 100,000 items) amounts of
* data. The hash uses chaining to resolve collisions, and does not feature a
* mechanism to grow the hash. Care must be taken to pick nchains to be large
* enough for the application at hand, or lots of time will be wasted searching
* hash chains.
*
* The client of the hash is required to supply a number of items to support
* the various hash functions:
*
* - Destructor functions for the key and value being hashed.
* A destructor is responsible for freeing an object when the hash
* table is no longer storing it. Since keys and values can be of
* arbitrary type, separate destructors for keys & values are used.
* These may be mod_hash_null_keydtor and mod_hash_null_valdtor if no
* destructor is needed for either a key or value.
*
* - A hashing algorithm which returns a uint_t representing a hash index
* The number returned need _not_ be between 0 and nchains. The mod_hash
* code will take care of doing that. The second argument (after the
* key) to the hashing function is a void * that represents
* hash_alg_data-- this is provided so that the hashing algrorithm can
* maintain some state across calls, or keep algorithm-specific
* constants associated with the hash table.
*
* A pointer-hashing and a string-hashing algorithm are supplied in
* this file.
*
* - A key comparator (a la qsort).
* This is used when searching the hash chain. The key comparator
* determines if two keys match. It should follow the return value
* semantics of strcmp.
*
* string and pointer comparators are supplied in this file.
*
* mod_hash_create_strhash() and mod_hash_create_ptrhash() provide good
* examples of how to create a customized hash table.
*
* Basic hash operations:
*
* mod_hash_create_strhash(name, nchains, dtor),
* create a hash using strings as keys.
* NOTE: This create a hash which automatically cleans up the string
* values it is given for keys.
*
* mod_hash_create_ptrhash(name, nchains, dtor, key_elem_size):
* create a hash using pointers as keys.
*
* mod_hash_create_extended(name, nchains, kdtor, vdtor,
* hash_alg, hash_alg_data,
* keycmp, sleep)
* create a customized hash table.
*
* mod_hash_destroy_hash(hash):
* destroy the given hash table, calling the key and value destructors
* on each key-value pair stored in the hash.
*
* mod_hash_insert(hash, key, val):
* place a key, value pair into the given hash.
* duplicate keys are rejected.
*
* mod_hash_insert_reserve(hash, key, val, handle):
* place a key, value pair into the given hash, using handle to indicate
* the reserved storage for the pair. (no memory allocation is needed
* during a mod_hash_insert_reserve.) duplicate keys are rejected.
*
* mod_hash_reserve(hash, *handle):
* reserve storage for a key-value pair using the memory allocation
* policy of 'hash', returning the storage handle in 'handle'.
*
* mod_hash_reserve_nosleep(hash, *handle): reserve storage for a key-value
* pair ignoring the memory allocation policy of 'hash' and always without
* sleep, returning the storage handle in 'handle'.
*
* mod_hash_remove(hash, key, *val):
* remove a key-value pair with key 'key' from 'hash', destroying the
* stored key, and returning the value in val.
*
* mod_hash_replace(hash, key, val)
* atomically remove an existing key-value pair from a hash, and replace
* the key and value with the ones supplied. The removed key and value
* (if any) are destroyed.
*
* mod_hash_destroy(hash, key):
* remove a key-value pair with key 'key' from 'hash', destroying both
* stored key and stored value.
*
* mod_hash_find(hash, key, val):
* find a value in the hash table corresponding to the given key.
*
* mod_hash_find_cb(hash, key, val, found_callback)
* find a value in the hash table corresponding to the given key.
* If a value is found, call specified callback passing key and val to it.
* The callback is called with the hash lock held.
* It is intended to be used in situations where the act of locating the
* data must also modify it - such as in reference counting schemes.
*
* mod_hash_walk(hash, callback(key, elem, arg), arg)
* walks all the elements in the hashtable and invokes the callback
* function with the key/value pair for each element. the hashtable
* is locked for readers so the callback function should not attempt
* to do any updates to the hashable. the callback function should
* return MH_WALK_CONTINUE to continue walking the hashtable or
* MH_WALK_TERMINATE to abort the walk of the hashtable.
*
* mod_hash_clear(hash):
* clears the given hash table of entries, calling the key and value
* destructors for every element in the hash.
*/
#include <sys/zfs_context.h>
#include <sys/bitmap.h>
#include <sys/modhash_impl.h>
#include <sys/sysmacros.h>
/*
* MH_KEY_DESTROY()
* Invoke the key destructor.
*/
#define MH_KEY_DESTROY(hash, key) ((hash->mh_kdtor)(key))
/*
* MH_VAL_DESTROY()
* Invoke the value destructor.
*/
#define MH_VAL_DESTROY(hash, val) ((hash->mh_vdtor)(val))
/*
* MH_KEYCMP()
* Call the key comparator for the given hash keys.
*/
#define MH_KEYCMP(hash, key1, key2) ((hash->mh_keycmp)(key1, key2))
/*
* Cache for struct mod_hash_entry
*/
kmem_cache_t *mh_e_cache = NULL;
mod_hash_t *mh_head = NULL;
kmutex_t mh_head_lock;
/*
* mod_hash_null_keydtor()
* mod_hash_null_valdtor()
* no-op key and value destructors.
*/
/*ARGSUSED*/
void
mod_hash_null_keydtor(mod_hash_key_t key)
{
}
/*ARGSUSED*/
void
mod_hash_null_valdtor(mod_hash_val_t val)
{
}
/*
* mod_hash_bystr()
* mod_hash_strkey_cmp()
* mod_hash_strkey_dtor()
* mod_hash_strval_dtor()
* Hash and key comparison routines for hashes with string keys.
*
* mod_hash_create_strhash()
* Create a hash using strings as keys
*
* The string hashing algorithm is from the "Dragon Book" --
* "Compilers: Principles, Tools & Techniques", by Aho, Sethi, Ullman
*/
/*ARGSUSED*/
uint_t
mod_hash_bystr(void *hash_data, mod_hash_key_t key)
{
uint_t hash = 0;
uint_t g;
char *p, *k = (char *)key;
ASSERT(k);
for (p = k; *p != '\0'; p++) {
hash = (hash << 4) + *p;
if ((g = (hash & 0xf0000000)) != 0) {
hash ^= (g >> 24);
hash ^= g;
}
}
return (hash);
}
int
mod_hash_strkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
{
return (strcmp((char *)key1, (char *)key2));
}
void
mod_hash_strkey_dtor(mod_hash_key_t key)
{
char *c = (char *)key;
kmem_free(c, strlen(c) + 1);
}
void
mod_hash_strval_dtor(mod_hash_val_t val)
{
char *c = (char *)val;
kmem_free(c, strlen(c) + 1);
}
mod_hash_t *
mod_hash_create_strhash_nodtr(char *name, size_t nchains,
void (*val_dtor)(mod_hash_val_t))
{
return mod_hash_create_extended(name, nchains, mod_hash_null_keydtor,
val_dtor, mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
}
mod_hash_t *
mod_hash_create_strhash(char *name, size_t nchains,
void (*val_dtor)(mod_hash_val_t))
{
return mod_hash_create_extended(name, nchains, mod_hash_strkey_dtor,
val_dtor, mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
}
void
mod_hash_destroy_strhash(mod_hash_t *strhash)
{
ASSERT(strhash);
mod_hash_destroy_hash(strhash);
}
/*
* mod_hash_byptr()
* mod_hash_ptrkey_cmp()
* Hash and key comparison routines for hashes with pointer keys.
*
* mod_hash_create_ptrhash()
* mod_hash_destroy_ptrhash()
* Create a hash that uses pointers as keys. This hash algorithm
* picks an appropriate set of middle bits in the address to hash on
* based on the size of the hash table and a hint about the size of
* the items pointed at.
*/
uint_t
mod_hash_byptr(void *hash_data, mod_hash_key_t key)
{
uintptr_t k = (uintptr_t)key;
k >>= (int)(uintptr_t)hash_data;
return ((uint_t)k);
}
int
mod_hash_ptrkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
{
uintptr_t k1 = (uintptr_t)key1;
uintptr_t k2 = (uintptr_t)key2;
if (k1 > k2)
return (-1);
else if (k1 < k2)
return (1);
else
return (0);
}
mod_hash_t *
mod_hash_create_ptrhash(char *name, size_t nchains,
void (*val_dtor)(mod_hash_val_t), size_t key_elem_size)
{
size_t rshift;
/*
* We want to hash on the bits in the middle of the address word
* Bits far to the right in the word have little significance, and
* are likely to all look the same (for example, an array of
* 256-byte structures will have the bottom 8 bits of address
* words the same). So we want to right-shift each address to
* ignore the bottom bits.
*
* The high bits, which are also unused, will get taken out when
* mod_hash takes hashkey % nchains.
*/
rshift = highbit(key_elem_size);
return mod_hash_create_extended(name, nchains, mod_hash_null_keydtor,
val_dtor, mod_hash_byptr, (void *)rshift, mod_hash_ptrkey_cmp,
KM_SLEEP);
}
void
mod_hash_destroy_ptrhash(mod_hash_t *hash)
{
ASSERT(hash);
mod_hash_destroy_hash(hash);
}
/*
* mod_hash_byid()
* mod_hash_idkey_cmp()
* Hash and key comparison routines for hashes with 32-bit unsigned keys.
*
* mod_hash_create_idhash()
* mod_hash_destroy_idhash()
* mod_hash_iddata_gen()
* Create a hash that uses numeric keys.
*
* The hash algorithm is documented in "Introduction to Algorithms"
* (Cormen, Leiserson, Rivest); when the hash table is created, it
* attempts to find the next largest prime above the number of hash
* slots. The hash index is then this number times the key modulo
* the hash size, or (key * prime) % nchains.
*/
uint_t
mod_hash_byid(void *hash_data, mod_hash_key_t key)
{
uint_t kval = (uint_t)(uintptr_t)hash_data;
return ((uint_t)(uintptr_t)key * (uint_t)kval);
}
int
mod_hash_idkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
{
return ((uint_t)(uintptr_t)key1 - (uint_t)(uintptr_t)key2);
}
/*
* Generate the next largest prime number greater than nchains; this value
* is intended to be later passed in to mod_hash_create_extended() as the
* hash_data.
*/
uint_t
mod_hash_iddata_gen(size_t nchains)
{
uint_t kval, i, prime;
/*
* Pick the first (odd) prime greater than nchains. Make sure kval is
* odd (so start with nchains +1 or +2 as appropriate).
*/
kval = (nchains % 2 == 0) ? nchains + 1 : nchains + 2;
for (;;) {
prime = 1;
for (i = 3; i * i <= kval; i += 2) {
if (kval % i == 0)
prime = 0;
}
if (prime == 1)
break;
kval += 2;
}
return (kval);
}
mod_hash_t *
mod_hash_create_idhash(char *name, size_t nchains,
void (*val_dtor)(mod_hash_val_t))
{
uint_t kval = mod_hash_iddata_gen(nchains);
return (mod_hash_create_extended(name, nchains, mod_hash_null_keydtor,
val_dtor, mod_hash_byid, (void *)(uintptr_t)kval,
mod_hash_idkey_cmp, KM_SLEEP));
}
void
mod_hash_destroy_idhash(mod_hash_t *hash)
{
ASSERT(hash);
mod_hash_destroy_hash(hash);
}
void
mod_hash_fini(void)
{
mutex_destroy(&mh_head_lock);
if (mh_e_cache) {
kmem_cache_destroy(mh_e_cache);
mh_e_cache = NULL;
}
}
/*
* mod_hash_init()
* sets up globals, etc for mod_hash_*
*/
void
mod_hash_init(void)
{
ASSERT(mh_e_cache == NULL);
mh_e_cache = kmem_cache_create("mod_hash_entries",
sizeof (struct mod_hash_entry), 0, NULL, NULL, NULL, NULL,
NULL, 0);
mutex_init(&mh_head_lock, NULL, MUTEX_DEFAULT, NULL);
}
/*
* mod_hash_create_extended()
* The full-blown hash creation function.
*
* notes:
* nchains - how many hash slots to create. More hash slots will
* result in shorter hash chains, but will consume
* slightly more memory up front.
* sleep - should be KM_SLEEP or KM_NOSLEEP, to indicate whether
* to sleep for memory, or fail in low-memory conditions.
*
* Fails only if KM_NOSLEEP was specified, and no memory was available.
*/
mod_hash_t *
mod_hash_create_extended(
char *hname, /* descriptive name for hash */
size_t nchains, /* number of hash slots */
void (*kdtor)(mod_hash_key_t), /* key destructor */
void (*vdtor)(mod_hash_val_t), /* value destructor */
uint_t (*hash_alg)(void *, mod_hash_key_t), /* hash algorithm */
void *hash_alg_data, /* pass-thru arg for hash_alg */
int (*keycmp)(mod_hash_key_t, mod_hash_key_t), /* key comparator */
int sleep) /* whether to sleep for mem */
{
mod_hash_t *mod_hash;
ASSERT(hname && keycmp && hash_alg && vdtor && kdtor);
if ((mod_hash = kmem_zalloc(MH_SIZE(nchains), sleep)) == NULL)
return (NULL);
mod_hash->mh_name = kmem_alloc(strlen(hname) + 1, sleep);
if (mod_hash->mh_name == NULL) {
kmem_free(mod_hash, MH_SIZE(nchains));
return (NULL);
}
(void) strcpy(mod_hash->mh_name, hname);
rw_init(&mod_hash->mh_contents, NULL, RW_DEFAULT, NULL);
mod_hash->mh_sleep = sleep;
mod_hash->mh_nchains = nchains;
mod_hash->mh_kdtor = kdtor;
mod_hash->mh_vdtor = vdtor;
mod_hash->mh_hashalg = hash_alg;
mod_hash->mh_hashalg_data = hash_alg_data;
mod_hash->mh_keycmp = keycmp;
/*
* Link the hash up on the list of hashes
*/
mutex_enter(&mh_head_lock);
mod_hash->mh_next = mh_head;
mh_head = mod_hash;
mutex_exit(&mh_head_lock);
return (mod_hash);
}
/*
* mod_hash_destroy_hash()
* destroy a hash table, destroying all of its stored keys and values
* as well.
*/
void
mod_hash_destroy_hash(mod_hash_t *hash)
{
mod_hash_t *mhp, *mhpp;
mutex_enter(&mh_head_lock);
/*
* Remove the hash from the hash list
*/
if (hash == mh_head) { /* removing 1st list elem */
mh_head = mh_head->mh_next;
} else {
/*
* mhpp can start out NULL since we know the 1st elem isn't the
* droid we're looking for.
*/
mhpp = NULL;
for (mhp = mh_head; mhp != NULL; mhp = mhp->mh_next) {
if (mhp == hash) {
mhpp->mh_next = mhp->mh_next;
break;
}
mhpp = mhp;
}
}
mutex_exit(&mh_head_lock);
/*
* Clean out keys and values.
*/
mod_hash_clear(hash);
rw_destroy(&hash->mh_contents);
kmem_free(hash->mh_name, strlen(hash->mh_name) + 1);
kmem_free(hash, MH_SIZE(hash->mh_nchains));
}
/*
* i_mod_hash()
* Call the hashing algorithm for this hash table, with the given key.
*/
uint_t
i_mod_hash(mod_hash_t *hash, mod_hash_key_t key)
{
uint_t h;
/*
* Prevent div by 0 problems;
* Also a nice shortcut when using a hash as a list
*/
if (hash->mh_nchains == 1)
return (0);
h = (hash->mh_hashalg)(hash->mh_hashalg_data, key);
return (h % (hash->mh_nchains - 1));
}
/*
* i_mod_hash_insert_nosync()
* mod_hash_insert()
* mod_hash_insert_reserve()
* insert 'val' into the hash table, using 'key' as its key. If 'key' is
* already a key in the hash, an error will be returned, and the key-val
* pair will not be inserted. i_mod_hash_insert_nosync() supports a simple
* handle abstraction, allowing hash entry allocation to be separated from
* the hash insertion. this abstraction allows simple use of the mod_hash
* structure in situations where mod_hash_insert() with a KM_SLEEP
* allocation policy would otherwise be unsafe.
*/
int
i_mod_hash_insert_nosync(mod_hash_t *hash, mod_hash_key_t key,
mod_hash_val_t val, mod_hash_hndl_t handle)
{
uint_t hashidx;
struct mod_hash_entry *entry;
ASSERT(hash);
/*
* If we've not been given reserved storage, allocate storage directly,
* using the hash's allocation policy.
*/
if (handle == (mod_hash_hndl_t)0) {
entry = kmem_cache_alloc(mh_e_cache, hash->mh_sleep);
if (entry == NULL) {
hash->mh_stat.mhs_nomem++;
return (MH_ERR_NOMEM);
}
} else {
entry = (struct mod_hash_entry *)handle;
}
hashidx = i_mod_hash(hash, key);
entry->mhe_key = key;
entry->mhe_val = val;
entry->mhe_next = hash->mh_entries[hashidx];
hash->mh_entries[hashidx] = entry;
hash->mh_stat.mhs_nelems++;
return (0);
}
int
mod_hash_insert(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t val)
{
int res;
mod_hash_val_t v;
rw_enter(&hash->mh_contents, RW_WRITER);
/*
* Disallow duplicate keys in the hash
*/
if (i_mod_hash_find_nosync(hash, key, &v) == 0) {
rw_exit(&hash->mh_contents);
hash->mh_stat.mhs_coll++;
return (MH_ERR_DUPLICATE);
}
res = i_mod_hash_insert_nosync(hash, key, val, (mod_hash_hndl_t)0);
rw_exit(&hash->mh_contents);
return (res);
}
int
mod_hash_insert_reserve(mod_hash_t *hash, mod_hash_key_t key,
mod_hash_val_t val, mod_hash_hndl_t handle)
{
int res;
mod_hash_val_t v;
rw_enter(&hash->mh_contents, RW_WRITER);
/*
* Disallow duplicate keys in the hash
*/
if (i_mod_hash_find_nosync(hash, key, &v) == 0) {
rw_exit(&hash->mh_contents);
hash->mh_stat.mhs_coll++;
return (MH_ERR_DUPLICATE);
}
res = i_mod_hash_insert_nosync(hash, key, val, handle);
rw_exit(&hash->mh_contents);
return (res);
}
/*
* mod_hash_reserve()
* mod_hash_reserve_nosleep()
* mod_hash_cancel()
* Make or cancel a mod_hash_entry_t reservation. Reservations are used in
* mod_hash_insert_reserve() above.
*/
int
mod_hash_reserve(mod_hash_t *hash, mod_hash_hndl_t *handlep)
{
*handlep = kmem_cache_alloc(mh_e_cache, hash->mh_sleep);
if (*handlep == NULL) {
hash->mh_stat.mhs_nomem++;
return (MH_ERR_NOMEM);
}
return (0);
}
int
mod_hash_reserve_nosleep(mod_hash_t *hash, mod_hash_hndl_t *handlep)
{
*handlep = kmem_cache_alloc(mh_e_cache, KM_NOSLEEP);
if (*handlep == NULL) {
hash->mh_stat.mhs_nomem++;
return (MH_ERR_NOMEM);
}
return (0);
}
/*ARGSUSED*/
void
mod_hash_cancel(mod_hash_t *hash, mod_hash_hndl_t *handlep)
{
kmem_cache_free(mh_e_cache, *handlep);
*handlep = (mod_hash_hndl_t)0;
}
/*
* i_mod_hash_remove_nosync()
* mod_hash_remove()
* Remove an element from the hash table.
*/
int
i_mod_hash_remove_nosync(mod_hash_t *hash, mod_hash_key_t key,
mod_hash_val_t *val)
{
int hashidx;
struct mod_hash_entry *e, *ep;
hashidx = i_mod_hash(hash, key);
ep = NULL; /* e's parent */
for (e = hash->mh_entries[hashidx]; e != NULL; e = e->mhe_next) {
if (MH_KEYCMP(hash, e->mhe_key, key) == 0)
break;
ep = e;
}
if (e == NULL) { /* not found */
return (MH_ERR_NOTFOUND);
}
if (ep == NULL) /* special case 1st element in bucket */
hash->mh_entries[hashidx] = e->mhe_next;
else
ep->mhe_next = e->mhe_next;
/*
* Clean up resources used by the node's key.
*/
MH_KEY_DESTROY(hash, e->mhe_key);
*val = e->mhe_val;
kmem_cache_free(mh_e_cache, e);
hash->mh_stat.mhs_nelems--;
return (0);
}
int
mod_hash_remove(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val)
{
int res;
rw_enter(&hash->mh_contents, RW_WRITER);
res = i_mod_hash_remove_nosync(hash, key, val);
rw_exit(&hash->mh_contents);
return (res);
}
/*
* mod_hash_replace()
* atomically remove an existing key-value pair from a hash, and replace
* the key and value with the ones supplied. The removed key and value
* (if any) are destroyed.
*/
int
mod_hash_replace(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t val)
{
int res;
mod_hash_val_t v;
rw_enter(&hash->mh_contents, RW_WRITER);
if (i_mod_hash_remove_nosync(hash, key, &v) == 0) {
/*
* mod_hash_remove() takes care of freeing up the key resources.
*/
MH_VAL_DESTROY(hash, v);
}
res = i_mod_hash_insert_nosync(hash, key, val, (mod_hash_hndl_t)0);
rw_exit(&hash->mh_contents);
return (res);
}
/*
* mod_hash_destroy()
* Remove an element from the hash table matching 'key', and destroy it.
*/
int
mod_hash_destroy(mod_hash_t *hash, mod_hash_key_t key)
{
mod_hash_val_t val;
int rv;
rw_enter(&hash->mh_contents, RW_WRITER);
if ((rv = i_mod_hash_remove_nosync(hash, key, &val)) == 0) {
/*
* mod_hash_remove() takes care of freeing up the key resources.
*/
MH_VAL_DESTROY(hash, val);
}
rw_exit(&hash->mh_contents);
return (rv);
}
/*
* i_mod_hash_find_nosync()
* mod_hash_find()
* Find a value in the hash table corresponding to the given key.
*/
int
i_mod_hash_find_nosync(mod_hash_t *hash, mod_hash_key_t key,
mod_hash_val_t *val)
{
uint_t hashidx;
struct mod_hash_entry *e;
hashidx = i_mod_hash(hash, key);
for (e = hash->mh_entries[hashidx]; e != NULL; e = e->mhe_next) {
if (MH_KEYCMP(hash, e->mhe_key, key) == 0) {
*val = e->mhe_val;
hash->mh_stat.mhs_hit++;
return (0);
}
}
hash->mh_stat.mhs_miss++;
return (MH_ERR_NOTFOUND);
}
int
mod_hash_find(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val)
{
int res;
rw_enter(&hash->mh_contents, RW_READER);
res = i_mod_hash_find_nosync(hash, key, val);
rw_exit(&hash->mh_contents);
return (res);
}
int
mod_hash_find_cb(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val,
void (*find_cb)(mod_hash_key_t, mod_hash_val_t))
{
int res;
rw_enter(&hash->mh_contents, RW_READER);
res = i_mod_hash_find_nosync(hash, key, val);
if (res == 0) {
find_cb(key, *val);
}
rw_exit(&hash->mh_contents);
return (res);
}
int
mod_hash_find_cb_rval(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val,
int (*find_cb)(mod_hash_key_t, mod_hash_val_t), int *cb_rval)
{
int res;
rw_enter(&hash->mh_contents, RW_READER);
res = i_mod_hash_find_nosync(hash, key, val);
if (res == 0) {
*cb_rval = find_cb(key, *val);
}
rw_exit(&hash->mh_contents);
return (res);
}
void
i_mod_hash_walk_nosync(mod_hash_t *hash,
uint_t (*callback)(mod_hash_key_t, mod_hash_val_t *, void *), void *arg)
{
struct mod_hash_entry *e;
uint_t hashidx;
int res = MH_WALK_CONTINUE;
for (hashidx = 0;
(hashidx < (hash->mh_nchains - 1)) && (res == MH_WALK_CONTINUE);
hashidx++) {
e = hash->mh_entries[hashidx];
while ((e != NULL) && (res == MH_WALK_CONTINUE)) {
res = callback(e->mhe_key, e->mhe_val, arg);
e = e->mhe_next;
}
}
}
/*
* mod_hash_walk()
* Walks all the elements in the hashtable and invokes the callback
* function with the key/value pair for each element. The hashtable
* is locked for readers so the callback function should not attempt
* to do any updates to the hashable. The callback function should
* return MH_WALK_CONTINUE to continue walking the hashtable or
* MH_WALK_TERMINATE to abort the walk of the hashtable.
*/
void
mod_hash_walk(mod_hash_t *hash,
uint_t (*callback)(mod_hash_key_t, mod_hash_val_t *, void *), void *arg)
{
rw_enter(&hash->mh_contents, RW_READER);
i_mod_hash_walk_nosync(hash, callback, arg);
rw_exit(&hash->mh_contents);
}
/*
* i_mod_hash_clear_nosync()
* mod_hash_clear()
* Clears the given hash table by calling the destructor of every hash
* element and freeing up all mod_hash_entry's.
*/
void
i_mod_hash_clear_nosync(mod_hash_t *hash)
{
int i;
struct mod_hash_entry *e, *old_e;
for (i = 0; i < hash->mh_nchains; i++) {
e = hash->mh_entries[i];
while (e != NULL) {
MH_KEY_DESTROY(hash, e->mhe_key);
MH_VAL_DESTROY(hash, e->mhe_val);
old_e = e;
e = e->mhe_next;
kmem_cache_free(mh_e_cache, old_e);
}
hash->mh_entries[i] = NULL;
}
hash->mh_stat.mhs_nelems = 0;
}
void
mod_hash_clear(mod_hash_t *hash)
{
ASSERT(hash);
rw_enter(&hash->mh_contents, RW_WRITER);
i_mod_hash_clear_nosync(hash);
rw_exit(&hash->mh_contents);
}

927
module/icp/spi/kcf_spi.c Normal file
View File

@ -0,0 +1,927 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file is part of the core Kernel Cryptographic Framework.
* It implements the SPI functions exported to cryptographic
* providers.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
#include <sys/crypto/spi.h>
/*
* minalloc and maxalloc values to be used for taskq_create().
*/
int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
static void remove_provider(kcf_provider_desc_t *);
static void process_logical_providers(crypto_provider_info_t *,
kcf_provider_desc_t *);
static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
static int kcf_prov_kstat_update(kstat_t *, int);
static void delete_kstat(kcf_provider_desc_t *);
static kcf_prov_stats_t kcf_stats_ks_data_template = {
{ "kcf_ops_total", KSTAT_DATA_UINT64 },
{ "kcf_ops_passed", KSTAT_DATA_UINT64 },
{ "kcf_ops_failed", KSTAT_DATA_UINT64 },
{ "kcf_ops_returned_busy", KSTAT_DATA_UINT64 }
};
#define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
*((dst)->ops) = *((src)->ops);
/*
* Copy an ops vector from src to dst. Used during provider registration
* to copy the ops vector from the provider info structure to the
* provider descriptor maintained by KCF.
* Copying the ops vector specified by the provider is needed since the
* framework does not require the provider info structure to be
* persistent.
*/
static void
copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
{
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
}
static void
copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
{
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
}
static void
copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
{
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
}
/*
* This routine is used to add cryptographic providers to the KEF framework.
* Providers pass a crypto_provider_info structure to crypto_register_provider()
* and get back a handle. The crypto_provider_info structure contains a
* list of mechanisms supported by the provider and an ops vector containing
* provider entry points. Hardware providers call this routine in their attach
* routines. Software providers call this routine in their _init() routine.
*/
int
crypto_register_provider(crypto_provider_info_t *info,
crypto_kcf_provider_handle_t *handle)
{
char ks_name[KSTAT_STRLEN];
kcf_provider_desc_t *prov_desc = NULL;
int ret = CRYPTO_ARGUMENTS_BAD;
if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
return (CRYPTO_VERSION_MISMATCH);
/*
* Check provider type, must be software, hardware, or logical.
*/
if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
info->pi_provider_type != CRYPTO_SW_PROVIDER &&
info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
return (CRYPTO_ARGUMENTS_BAD);
/*
* Allocate and initialize a new provider descriptor. We also
* hold it and release it when done.
*/
prov_desc = kcf_alloc_provider_desc(info);
KCF_PROV_REFHOLD(prov_desc);
prov_desc->pd_prov_type = info->pi_provider_type;
/* provider-private handle, opaque to KCF */
prov_desc->pd_prov_handle = info->pi_provider_handle;
/* copy provider description string */
if (info->pi_provider_description != NULL) {
/*
* pi_provider_descriptor is a string that can contain
* up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
* INCLUDING the terminating null character. A bcopy()
* is necessary here as pd_description should not have
* a null character. See comments in kcf_alloc_provider_desc()
* for details on pd_description field.
*/
bcopy(info->pi_provider_description, prov_desc->pd_description,
MIN(strlen(info->pi_provider_description),
(size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN));
}
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
if (info->pi_ops_vector == NULL) {
goto bail;
}
copy_ops_vector_v1(info->pi_ops_vector,
prov_desc->pd_ops_vector);
if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
copy_ops_vector_v2(info->pi_ops_vector,
prov_desc->pd_ops_vector);
prov_desc->pd_flags = info->pi_flags;
}
if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
copy_ops_vector_v3(info->pi_ops_vector,
prov_desc->pd_ops_vector);
}
}
/* object_ops and nostore_key_ops are mutually exclusive */
if (prov_desc->pd_ops_vector->co_object_ops &&
prov_desc->pd_ops_vector->co_nostore_key_ops) {
goto bail;
}
/* process the mechanisms supported by the provider */
if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
goto bail;
/*
* Add provider to providers tables, also sets the descriptor
* pd_prov_id field.
*/
if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
undo_register_provider(prov_desc, B_FALSE);
goto bail;
}
/*
* We create a taskq only for a hardware provider. The global
* software queue is used for software providers. We handle ordering
* of multi-part requests in the taskq routine. So, it is safe to
* have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
* to keep some entries cached to improve performance.
*/
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
crypto_taskq_threads, minclsyspri,
crypto_taskq_minalloc, crypto_taskq_maxalloc,
TASKQ_PREPOPULATE);
else
prov_desc->pd_sched_info.ks_taskq = NULL;
/* no kernel session to logical providers */
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
/*
* Open a session for session-oriented providers. This session
* is used for all kernel consumers. This is fine as a provider
* is required to support multiple thread access to a session.
* We can do this only after the taskq has been created as we
* do a kcf_submit_request() to open the session.
*/
if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
kcf_req_params_t params;
KCF_WRAP_SESSION_OPS_PARAMS(&params,
KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
CRYPTO_USER, NULL, 0, prov_desc);
ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
B_FALSE);
if (ret != CRYPTO_SUCCESS) {
undo_register_provider(prov_desc, B_TRUE);
ret = CRYPTO_FAILED;
goto bail;
}
}
}
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
/*
* Create the kstat for this provider. There is a kstat
* installed for each successfully registered provider.
* This kstat is deleted, when the provider unregisters.
*/
if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
"NONAME", "provider_stats");
} else {
(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
"NONAME", 0,
prov_desc->pd_prov_id, "provider_stats");
}
prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (prov_desc->pd_kstat != NULL) {
bcopy(&kcf_stats_ks_data_template,
&prov_desc->pd_ks_data,
sizeof (kcf_stats_ks_data_template));
prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
prov_desc->pd_kstat->ks_private = prov_desc;
prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
kstat_install(prov_desc->pd_kstat);
}
}
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
process_logical_providers(info, prov_desc);
mutex_enter(&prov_desc->pd_lock);
prov_desc->pd_state = KCF_PROV_READY;
mutex_exit(&prov_desc->pd_lock);
kcf_do_notify(prov_desc, B_TRUE);
*handle = prov_desc->pd_kcf_prov_handle;
ret = CRYPTO_SUCCESS;
bail:
KCF_PROV_REFRELE(prov_desc);
return (ret);
}
/*
* This routine is used to notify the framework when a provider is being
* removed. Hardware providers call this routine in their detach routines.
* Software providers call this routine in their _fini() routine.
*/
int
crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
{
uint_t mech_idx;
kcf_provider_desc_t *desc;
kcf_prov_state_t saved_state;
/* lookup provider descriptor */
if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
return (CRYPTO_UNKNOWN_PROVIDER);
mutex_enter(&desc->pd_lock);
/*
* Check if any other thread is disabling or removing
* this provider. We return if this is the case.
*/
if (desc->pd_state >= KCF_PROV_DISABLED) {
mutex_exit(&desc->pd_lock);
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
return (CRYPTO_BUSY);
}
saved_state = desc->pd_state;
desc->pd_state = KCF_PROV_REMOVED;
if (saved_state == KCF_PROV_BUSY) {
/*
* The per-provider taskq threads may be waiting. We
* signal them so that they can start failing requests.
*/
cv_broadcast(&desc->pd_resume_cv);
}
if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
/*
* Check if this provider is currently being used.
* pd_irefcnt is the number of holds from the internal
* structures. We add one to account for the above lookup.
*/
if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
desc->pd_state = saved_state;
mutex_exit(&desc->pd_lock);
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
/*
* The administrator presumably will stop the clients
* thus removing the holds, when they get the busy
* return value. Any retry will succeed then.
*/
return (CRYPTO_BUSY);
}
}
mutex_exit(&desc->pd_lock);
if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
remove_provider(desc);
}
if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
/* remove the provider from the mechanisms tables */
for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
mech_idx++) {
kcf_remove_mech_provider(
desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
}
}
/* remove provider from providers table */
if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
CRYPTO_SUCCESS) {
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
return (CRYPTO_UNKNOWN_PROVIDER);
}
delete_kstat(desc);
if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
/*
* Wait till the existing requests complete.
*/
mutex_enter(&desc->pd_lock);
while (desc->pd_state != KCF_PROV_FREED)
cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
mutex_exit(&desc->pd_lock);
} else {
/*
* Wait until requests that have been sent to the provider
* complete.
*/
mutex_enter(&desc->pd_lock);
while (desc->pd_irefcnt > 0)
cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
mutex_exit(&desc->pd_lock);
}
kcf_do_notify(desc, B_FALSE);
if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
/*
* This is the only place where kcf_free_provider_desc()
* is called directly. KCF_PROV_REFRELE() should free the
* structure in all other places.
*/
ASSERT(desc->pd_state == KCF_PROV_FREED &&
desc->pd_refcnt == 0);
kcf_free_provider_desc(desc);
} else {
KCF_PROV_REFRELE(desc);
}
return (CRYPTO_SUCCESS);
}
/*
* This routine is used to notify the framework that the state of
* a cryptographic provider has changed. Valid state codes are:
*
* CRYPTO_PROVIDER_READY
* The provider indicates that it can process more requests. A provider
* will notify with this event if it previously has notified us with a
* CRYPTO_PROVIDER_BUSY.
*
* CRYPTO_PROVIDER_BUSY
* The provider can not take more requests.
*
* CRYPTO_PROVIDER_FAILED
* The provider encountered an internal error. The framework will not
* be sending any more requests to the provider. The provider may notify
* with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
*
* This routine can be called from user or interrupt context.
*/
void
crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
{
kcf_provider_desc_t *pd;
/* lookup the provider from the given handle */
if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
return;
mutex_enter(&pd->pd_lock);
if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
goto out;
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
cmn_err(CE_WARN, "crypto_provider_notification: "
"logical provider (%x) ignored\n", handle);
goto out;
}
switch (state) {
case CRYPTO_PROVIDER_READY:
switch (pd->pd_state) {
case KCF_PROV_BUSY:
pd->pd_state = KCF_PROV_READY;
/*
* Signal the per-provider taskq threads that they
* can start submitting requests.
*/
cv_broadcast(&pd->pd_resume_cv);
break;
case KCF_PROV_FAILED:
/*
* The provider recovered from the error. Let us
* use it now.
*/
pd->pd_state = KCF_PROV_READY;
break;
default:
break;
}
break;
case CRYPTO_PROVIDER_BUSY:
switch (pd->pd_state) {
case KCF_PROV_READY:
pd->pd_state = KCF_PROV_BUSY;
break;
default:
break;
}
break;
case CRYPTO_PROVIDER_FAILED:
/*
* We note the failure and return. The per-provider taskq
* threads check this flag and start failing the
* requests, if it is set. See process_req_hwp() for details.
*/
switch (pd->pd_state) {
case KCF_PROV_READY:
pd->pd_state = KCF_PROV_FAILED;
break;
case KCF_PROV_BUSY:
pd->pd_state = KCF_PROV_FAILED;
/*
* The per-provider taskq threads may be waiting. We
* signal them so that they can start failing requests.
*/
cv_broadcast(&pd->pd_resume_cv);
break;
default:
break;
}
break;
default:
break;
}
out:
mutex_exit(&pd->pd_lock);
KCF_PROV_REFRELE(pd);
}
/*
* This routine is used to notify the framework the result of
* an asynchronous request handled by a provider. Valid error
* codes are the same as the CRYPTO_* errors defined in common.h.
*
* This routine can be called from user or interrupt context.
*/
void
crypto_op_notification(crypto_req_handle_t handle, int error)
{
kcf_call_type_t ctype;
if (handle == NULL)
return;
if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
if (error != CRYPTO_SUCCESS)
sreq->sn_provider->pd_sched_info.ks_nfails++;
KCF_PROV_IREFRELE(sreq->sn_provider);
kcf_sop_done(sreq, error);
} else {
kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
ASSERT(ctype == CRYPTO_ASYNCH);
if (error != CRYPTO_SUCCESS)
areq->an_provider->pd_sched_info.ks_nfails++;
KCF_PROV_IREFRELE(areq->an_provider);
kcf_aop_done(areq, error);
}
}
/*
* This routine is used by software providers to determine
* whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
* Note that hardware providers can always use KM_SLEEP. So,
* they do not need to call this routine.
*
* This routine can be called from user or interrupt context.
*/
int
crypto_kmflag(crypto_req_handle_t handle)
{
return (REQHNDL2_KMFLAG(handle));
}
/*
* Process the mechanism info structures specified by the provider
* during registration. A NULL crypto_provider_info_t indicates
* an already initialized provider descriptor.
*
* Mechanisms are not added to the kernel's mechanism table if the
* provider is a logical provider.
*
* Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
* of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
* if the table of mechanisms is full.
*/
static int
init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
{
uint_t mech_idx;
uint_t cleanup_idx;
int err = CRYPTO_SUCCESS;
kcf_prov_mech_desc_t *pmd;
int desc_use_count = 0;
int mcount = desc->pd_mech_list_count;
if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
if (info != NULL) {
ASSERT(info->pi_mechanisms != NULL);
bcopy(info->pi_mechanisms, desc->pd_mechanisms,
sizeof (crypto_mech_info_t) * mcount);
}
return (CRYPTO_SUCCESS);
}
/*
* Copy the mechanism list from the provider info to the provider
* descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
* element if the provider has random_ops since we keep an internal
* mechanism, SUN_RANDOM, in this case.
*/
if (info != NULL) {
if (info->pi_ops_vector->co_random_ops != NULL) {
crypto_mech_info_t *rand_mi;
/*
* Need the following check as it is possible to have
* a provider that implements just random_ops and has
* pi_mechanisms == NULL.
*/
if (info->pi_mechanisms != NULL) {
bcopy(info->pi_mechanisms, desc->pd_mechanisms,
sizeof (crypto_mech_info_t) * (mcount - 1));
}
rand_mi = &desc->pd_mechanisms[mcount - 1];
bzero(rand_mi, sizeof (crypto_mech_info_t));
(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
CRYPTO_MAX_MECH_NAME);
rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
} else {
ASSERT(info->pi_mechanisms != NULL);
bcopy(info->pi_mechanisms, desc->pd_mechanisms,
sizeof (crypto_mech_info_t) * mcount);
}
}
/*
* For each mechanism support by the provider, add the provider
* to the corresponding KCF mechanism mech_entry chain.
*/
for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
(mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
err = CRYPTO_ARGUMENTS_BAD;
break;
}
if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
/*
* We ask the provider to specify the limit
* per hash mechanism. But, in practice, a
* hardware limitation means all hash mechanisms
* will have the same maximum size allowed for
* input data. So, we make it a per provider
* limit to keep it simple.
*/
if (mi->cm_max_input_length == 0) {
err = CRYPTO_ARGUMENTS_BAD;
break;
} else {
desc->pd_hash_limit = mi->cm_max_input_length;
}
}
if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
KCF_SUCCESS)
break;
if (pmd == NULL)
continue;
/* The provider will be used for this mechanism */
desc_use_count++;
}
/*
* Don't allow multiple software providers with disabled mechanisms
* to register. Subsequent enabling of mechanisms will result in
* an unsupported configuration, i.e. multiple software providers
* per mechanism.
*/
if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
return (CRYPTO_ARGUMENTS_BAD);
if (err == KCF_SUCCESS)
return (CRYPTO_SUCCESS);
/*
* An error occurred while adding the mechanism, cleanup
* and bail.
*/
for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
kcf_remove_mech_provider(
desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
}
if (err == KCF_MECH_TAB_FULL)
return (CRYPTO_HOST_MEMORY);
return (CRYPTO_ARGUMENTS_BAD);
}
/*
* Update routine for kstat. Only privileged users are allowed to
* access this information, since this information is sensitive.
* There are some cryptographic attacks (e.g. traffic analysis)
* which can use this information.
*/
static int
kcf_prov_kstat_update(kstat_t *ksp, int rw)
{
kcf_prov_stats_t *ks_data;
kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
if (rw == KSTAT_WRITE)
return (EACCES);
ks_data = ksp->ks_data;
ks_data->ps_ops_total.value.ui64 =
pd->pd_sched_info.ks_ndispatches;
ks_data->ps_ops_failed.value.ui64 =
pd->pd_sched_info.ks_nfails;
ks_data->ps_ops_busy_rval.value.ui64 =
pd->pd_sched_info.ks_nbusy_rval;
ks_data->ps_ops_passed.value.ui64 =
pd->pd_sched_info.ks_ndispatches -
pd->pd_sched_info.ks_nfails -
pd->pd_sched_info.ks_nbusy_rval;
return (0);
}
/*
* Utility routine called from failure paths in crypto_register_provider()
* and from crypto_load_soft_disabled().
*/
void
undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
{
uint_t mech_idx;
/* remove the provider from the mechanisms tables */
for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
mech_idx++) {
kcf_remove_mech_provider(
desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
}
/* remove provider from providers table */
if (remove_prov)
(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
}
/*
* Utility routine called from crypto_load_soft_disabled(). Callers
* should have done a prior undo_register_provider().
*/
void
redo_register_provider(kcf_provider_desc_t *pd)
{
/* process the mechanisms supported by the provider */
(void) init_prov_mechs(NULL, pd);
/*
* Hold provider in providers table. We should not call
* kcf_prov_tab_add_provider() here as the provider descriptor
* is still valid which means it has an entry in the provider
* table.
*/
KCF_PROV_REFHOLD(pd);
KCF_PROV_IREFHOLD(pd);
}
/*
* Add provider (p1) to another provider's array of providers (p2).
* Hardware and logical providers use this array to cross-reference
* each other.
*/
static void
add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
{
kcf_provider_list_t *new;
new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
mutex_enter(&p2->pd_lock);
new->pl_next = p2->pd_provider_list;
p2->pd_provider_list = new;
KCF_PROV_IREFHOLD(p1);
new->pl_provider = p1;
mutex_exit(&p2->pd_lock);
}
/*
* Remove provider (p1) from another provider's array of providers (p2).
* Hardware and logical providers use this array to cross-reference
* each other.
*/
static void
remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
{
kcf_provider_list_t *pl = NULL, **prev;
mutex_enter(&p2->pd_lock);
for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
if (pl->pl_provider == p1) {
break;
}
}
if (p1 == NULL) {
mutex_exit(&p2->pd_lock);
return;
}
/* detach and free kcf_provider_list structure */
KCF_PROV_IREFRELE(p1);
*prev = pl->pl_next;
kmem_free(pl, sizeof (*pl));
mutex_exit(&p2->pd_lock);
}
/*
* Convert an array of logical provider handles (crypto_provider_id)
* stored in a crypto_provider_info structure into an array of provider
* descriptors (kcf_provider_desc_t) attached to a logical provider.
*/
static void
process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
{
kcf_provider_desc_t *lp;
crypto_provider_id_t handle;
int count = info->pi_logical_provider_count;
int i;
/* add hardware provider to each logical provider */
for (i = 0; i < count; i++) {
handle = info->pi_logical_providers[i];
lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
if (lp == NULL) {
continue;
}
add_provider_to_array(hp, lp);
hp->pd_flags |= KCF_LPROV_MEMBER;
/*
* A hardware provider has to have the provider descriptor of
* every logical provider it belongs to, so it can be removed
* from the logical provider if the hardware provider
* unregisters from the framework.
*/
add_provider_to_array(lp, hp);
KCF_PROV_REFRELE(lp);
}
}
/*
* This routine removes a provider from all of the logical or
* hardware providers it belongs to, and frees the provider's
* array of pointers to providers.
*/
static void
remove_provider(kcf_provider_desc_t *pp)
{
kcf_provider_desc_t *p;
kcf_provider_list_t *e, *next;
mutex_enter(&pp->pd_lock);
for (e = pp->pd_provider_list; e != NULL; e = next) {
p = e->pl_provider;
remove_provider_from_array(pp, p);
if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
p->pd_provider_list == NULL)
p->pd_flags &= ~KCF_LPROV_MEMBER;
KCF_PROV_IREFRELE(p);
next = e->pl_next;
kmem_free(e, sizeof (*e));
}
pp->pd_provider_list = NULL;
mutex_exit(&pp->pd_lock);
}
/*
* Dispatch events as needed for a provider. is_added flag tells
* whether the provider is registering or unregistering.
*/
void
kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
{
int i;
crypto_notify_event_change_t ec;
ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
/*
* Inform interested clients of the mechanisms becoming
* available/unavailable. We skip this for logical providers
* as they do not affect mechanisms.
*/
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
ec.ec_provider_type = prov_desc->pd_prov_type;
ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
CRYPTO_MECH_REMOVED;
for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
(void) strncpy(ec.ec_mech_name,
prov_desc->pd_mechanisms[i].cm_mech_name,
CRYPTO_MAX_MECH_NAME);
kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
}
}
/*
* Inform interested clients about the new or departing provider.
* In case of a logical provider, we need to notify the event only
* for the logical provider and not for the underlying
* providers which are known by the KCF_LPROV_MEMBER bit.
*/
if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
(prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
}
}
static void
delete_kstat(kcf_provider_desc_t *desc)
{
/* destroy the kstat created for this provider */
if (desc->pd_kstat != NULL) {
kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
/* release reference held by desc->pd_kstat->ks_private */
ASSERT(desc == kspd);
kstat_delete(kspd->pd_kstat);
desc->pd_kstat = NULL;
KCF_PROV_REFRELE(kspd);
KCF_PROV_IREFRELE(kspd);
}
}

View File

@ -12,7 +12,7 @@ if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
. "${basedir}/../${SCRIPT_CONFIG}"
else
KERNEL_MODULES=(zlib_deflate zlib_inflate)
MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
MODULES=(spl splat zavl znvpair zunicode zcommon icp zfs)
fi
PROG="<define PROG>"

View File

@ -82,6 +82,7 @@ export ZFS_MODULES=( \
${MODDIR}/nvpair/znvpair.ko \
${MODDIR}/unicode/zunicode.ko \
${MODDIR}/zcommon/zcommon.ko \
${MODDIR}/icp/icp.ko \
${MODDIR}/zfs/zfs.ko \
)