diff --git a/Configure b/Configure index 43a90c0..5b4b08d 100755 --- a/Configure +++ b/Configure @@ -124,11 +124,11 @@ my $tlib="-lnsl -lsocket"; my $bits1="THIRTY_TWO_BIT "; my $bits2="SIXTY_FOUR_BIT "; -my $x86_asm="x86cpuid.o:bn-586.o co-586.o x86-mont.o x86-gf2m.o:des-586.o crypt586.o:aes-586.o vpaes-x86.o aesni-x86.o:bf-586.o:md5-586.o:sha1-586.o sha256-586.o sha512-586.o:cast-586.o:rc4-586.o:rmd-586.o:rc5-586.o:wp_block.o wp-mmx.o:cmll-x86.o:ghash-x86.o:"; +my $x86_asm="x86cpuid.o:bn-586.o co-586.o x86-mont.o x86-gf2m.o:des-586.o crypt586.o:aes-586.o vpaes-x86.o aesni-x86.o:bf-586.o:md5-586.o:sha1-586.o sha256-586.o sha512-586.o:cast-586.o:rc4-586.o:rmd-586.o:rc5-586.o:wp_block.o wp-mmx.o:cmll-x86.o:ghash-x86.o:e_padlock-x86.o"; my $x86_elf_asm="$x86_asm:elf"; -my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o:"; +my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o:e_padlock-x86_64.o"; my $ia64_asm="ia64cpuid.o:bn-ia64.o ia64-mont.o::aes_core.o aes_cbc.o aes-ia64.o::md5-ia64.o:sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o:::::ghash-ia64.o::void"; my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o:des_enc-sparc.o fcrypt_b.o:aes_core.o aes_cbc.o aes-sparcv9.o:::sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o:::::::ghash-sparcv9.o::void"; my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::::void"; @@ -191,7 +191,7 @@ my %table=( "debug-linux-ppro","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DL_ENDIAN -DTERMIO -g -mcpu=pentiumpro -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn", "debug-linux-elf","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DL_ENDIAN -DTERMIO -g -march=i486 -Wall::-D_REENTRANT::-lefence -ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "debug-linux-elf-noefence","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DBN_CTX_DEBUG -DCRYPTO_MDEBUG -DL_ENDIAN -DTERMIO -g -march=i486 -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:${x86_elf_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", -"debug-linux-ia32-aes", "gcc:-DAES_EXPERIMENTAL -DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:x86cpuid.o:bn-586.o co-586.o x86-mont.o:des-586.o crypt586.o:aes_x86core.o aes_cbc.o aesni-x86.o:bf-586.o:md5-586.o:sha1-586.o sha256-586.o sha512-586.o:cast-586.o:rc4-586.o:rmd-586.o:rc5-586.o:wp_block.o wp-mmx.o::ghash-x86.o::elf:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", +"debug-linux-ia32-aes", "gcc:-DAES_EXPERIMENTAL -DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}:x86cpuid.o:bn-586.o co-586.o x86-mont.o:des-586.o crypt586.o:aes_x86core.o aes_cbc.o aesni-x86.o:bf-586.o:md5-586.o:sha1-586.o sha256-586.o sha512-586.o:cast-586.o:rc4-586.o:rmd-586.o:rc5-586.o:wp_block.o wp-mmx.o::ghash-x86.o:e_padlock-x86.o:elf:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "debug-linux-generic32","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DTERMIO -g -Wall::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "debug-linux-generic64","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DTERMIO -g -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "debug-linux-x86_64","gcc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -m64 -DL_ENDIAN -DTERMIO -g -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL:${x86_64_asm}:elf:dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR):::64", @@ -1983,7 +1983,7 @@ BEGIN VALUE "ProductVersion", "$version\\0" // Optional: //VALUE "Comments", "\\0" - VALUE "LegalCopyright", "Copyright © 1998-2005 The OpenSSL Project. Copyright © 1995-1998 Eric A. Young, Tim J. Hudson. All rights reserved.\\0" + VALUE "LegalCopyright", "Copyright � 1998-2005 The OpenSSL Project. Copyright � 1995-1998 Eric A. Young, Tim J. Hudson. All rights reserved.\\0" //VALUE "LegalTrademarks", "\\0" //VALUE "PrivateBuild", "\\0" //VALUE "SpecialBuild", "\\0" diff --git a/engines/Makefile b/engines/Makefile index 2fa9534..2e3d0fe 100644 --- a/engines/Makefile +++ b/engines/Makefile @@ -16,10 +16,13 @@ RECURSIVE_MAKE= [ -z "$(ENGDIRS)" ] || for i in $(ENGDIRS) ; do \ $(MAKE) -e TOP=../.. DIR=$$i $$target ) || exit 1; \ done; +ENGINES_ASM_OBJ= + PEX_LIBS= EX_LIBS= CFLAGS= $(INCLUDES) $(CFLAG) +ASFLAGS= $(INCLUDES) $(ASFLAG) GENERAL=Makefile engines.com install.com engine_vector.mar TEST= @@ -49,7 +52,8 @@ LIBOBJ= e_4758cca.o \ e_sureware.o \ e_ubsec.o \ e_padlock.o \ - e_capi.o + e_capi.o \ + $(ENGINES_ASM_OBJ) SRC= $(LIBSRC) @@ -77,7 +81,7 @@ lib: $(LIBOBJ) set -e; \ for l in $(LIBNAMES); do \ $(MAKE) -f ../Makefile.shared -e \ - LIBNAME=$$l LIBEXTRAS=e_$$l.o \ + LIBNAME=$$l LIBEXTRAS="e_$$l*.o" \ LIBDEPS='-L.. -lcrypto $(EX_LIBS)' \ link_o.$(SHLIB_TARGET); \ done; \ @@ -87,6 +91,11 @@ lib: $(LIBOBJ) fi; \ touch lib +e_padlock-x86.s: asm/e_padlock-x86.pl + $(PERL) asm/e_padlock-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ +e_padlock-x86_64.s: asm/e_padlock-x86_64.pl + $(PERL) asm/e_padlock-x86_64.pl $(PERLASM_SCHEME) > $@ + subdirs: echo $(EDIRS) @target=all; $(RECURSIVE_MAKE) diff --git a/engines/asm/e_padlock-x86.pl b/engines/asm/e_padlock-x86.pl new file mode 100644 index 0000000..4148468 --- /dev/null +++ b/engines/asm/e_padlock-x86.pl @@ -0,0 +1,606 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# September 2011 +# +# Assembler helpers for Padlock engine. Compared to original engine +# version relying on inline assembler and compiled with gcc 3.4.6 it +# was measured to provide ~100% improvement on misaligned data in ECB +# mode and ~75% in CBC mode. For aligned data improvement can be +# observed for short inputs only, e.g. 45% for 64-byte messages in +# ECB mode, 20% in CBC. Difference in performance for aligned vs. +# misaligned data depends on misalignment and is either ~1.8x or 2.9x. +# These are approximately same factors as for hardware support, so +# there is little reason to rely on the latter. On the contrary, it +# might actually hurt performance in mixture of aligned and misaligned +# buffers, because a) if you choose to flip 'align' flag in control +# word on per-buffer basis, then you'd have to reload key context, +# which incurs penalty; b) if you choose to set 'align' flag +# permanently, it limits performance even for aligned data to ~1/2. +# All above mentioned results were collected on 1.5GHz C7. Nano on the +# other hand handles unaligned data more gracefully. Depending on +# algorithm and how unaligned data is, hardware can be up to 70% more +# efficient than below software alignment procedures, nor does 'align' +# flag have affect on aligned performance [if has any meaning at all]. +# Therefore suggestion is to unconditionally set 'align' flag on Nano +# for optimal performance. + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../crypto/perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +%PADLOCK_PREFETCH=(ecb=>128, cbc=>64); # prefetch errata +$PADLOCK_CHUNK=512; # Must be a power of 2 larger than 16 + +$ctx="edx"; +$out="edi"; +$inp="esi"; +$len="ecx"; +$chunk="ebx"; + +&function_begin_B("padlock_capability"); + &push ("ebx"); + &pushf (); + &pop ("eax"); + &mov ("ecx","eax"); + &xor ("eax",1<<21); + &push ("eax"); + &popf (); + &pushf (); + &pop ("eax"); + &xor ("ecx","eax"); + &xor ("eax","eax"); + &bt ("ecx",21); + &jnc (&label("noluck")); + &cpuid (); + &xor ("eax","eax"); + &cmp ("ebx","0x".unpack("H*",'tneC')); + &jne (&label("noluck")); + &cmp ("edx","0x".unpack("H*",'Hrua')); + &jne (&label("noluck")); + &cmp ("ecx","0x".unpack("H*",'slua')); + &jne (&label("noluck")); + &mov ("eax",0xC0000000); + &cpuid (); + &mov ("edx","eax"); + &xor ("eax","eax"); + &cmp ("edx",0xC0000001); + &jb (&label("noluck")); + &mov ("eax",1); + &cpuid (); + &or ("eax",0x0f); + &xor ("ebx","ebx"); + &and ("eax",0x0fff); + &cmp ("eax",0x06ff); # check for Nano + &sete ("bl"); + &mov ("eax",0xC0000001); + &push ("ebx"); + &cpuid (); + &pop ("ebx"); + &mov ("eax","edx"); + &shl ("ebx",4); # bit#4 denotes Nano + &and ("eax",0xffffffef); + &or ("eax","ebx") +&set_label("noluck"); + &pop ("ebx"); + &ret (); +&function_end_B("padlock_capability") + +&function_begin_B("padlock_key_bswap"); + &mov ("edx",&wparam(0)); + &mov ("ecx",&DWP(240,"edx")); +&set_label("bswap_loop"); + &mov ("eax",&DWP(0,"edx")); + &bswap ("eax"); + &mov (&DWP(0,"edx"),"eax"); + &lea ("edx",&DWP(4,"edx")); + &sub ("ecx",1); + &jnz (&label("bswap_loop")); + &ret (); +&function_end_B("padlock_key_bswap"); + +# This is heuristic key context tracing. At first one +# believes that one should use atomic swap instructions, +# but it's not actually necessary. Point is that if +# padlock_saved_context was changed by another thread +# after we've read it and before we compare it with ctx, +# our key *shall* be reloaded upon thread context switch +# and we are therefore set in either case... +&static_label("padlock_saved_context"); + +&function_begin_B("padlock_verify_context"); + &mov ($ctx,&wparam(0)); + &lea ("eax",($::win32 or $::coff) ? &DWP(&label("padlock_saved_context")) : + &DWP(&label("padlock_saved_context")."-".&label("verify_pic_point"))); + &pushf (); + &call ("_padlock_verify_ctx"); +&set_label("verify_pic_point"); + &lea ("esp",&DWP(4,"esp")); + &ret (); +&function_end_B("padlock_verify_context"); + +&function_begin_B("_padlock_verify_ctx"); + &add ("eax",&DWP(0,"esp")) if(!($::win32 or $::coff));# &padlock_saved_context + &bt (&DWP(4,"esp"),30); # eflags + &jnc (&label("verified")); + &cmp ($ctx,&DWP(0,"eax")); + &je (&label("verified")); + &pushf (); + &popf (); +&set_label("verified"); + &mov (&DWP(0,"eax"),$ctx); + &ret (); +&function_end_B("_padlock_verify_ctx"); + +&function_begin_B("padlock_reload_key"); + &pushf (); + &popf (); + &ret (); +&function_end_B("padlock_reload_key"); + +&function_begin_B("padlock_aes_block"); + &push ("edi"); + &push ("esi"); + &push ("ebx"); + &mov ($out,&wparam(0)); # must be 16-byte aligned + &mov ($inp,&wparam(1)); # must be 16-byte aligned + &mov ($ctx,&wparam(2)); + &mov ($len,1); + &lea ("ebx",&DWP(32,$ctx)); # key + &lea ($ctx,&DWP(16,$ctx)); # control word + &data_byte(0xf3,0x0f,0xa7,0xc8); # rep xcryptecb + &pop ("ebx"); + &pop ("esi"); + &pop ("edi"); + &ret (); +&function_end_B("padlock_aes_block"); + +sub generate_mode { +my ($mode,$opcode) = @_; +# int padlock_$mode_encrypt(void *out, const void *inp, +# struct padlock_cipher_data *ctx, size_t len); +&function_begin("padlock_${mode}_encrypt"); + &mov ($out,&wparam(0)); + &mov ($inp,&wparam(1)); + &mov ($ctx,&wparam(2)); + &mov ($len,&wparam(3)); + &test ($ctx,15); + &jnz (&label("${mode}_abort")); + &test ($len,15); + &jnz (&label("${mode}_abort")); + &lea ("eax",($::win32 or $::coff) ? &DWP(&label("padlock_saved_context")) : + &DWP(&label("padlock_saved_context")."-".&label("${mode}_pic_point"))); + &pushf (); + &cld (); + &call ("_padlock_verify_ctx"); +&set_label("${mode}_pic_point"); + &lea ($ctx,&DWP(16,$ctx)); # control word + &xor ("eax","eax"); + if ($mode eq "ctr32") { + &movq ("mm0",&QWP(-16,$ctx)); # load [upper part of] counter + } else { + &xor ("ebx","ebx"); + &test (&DWP(0,$ctx),1<<5); # align bit in control word + &jnz (&label("${mode}_aligned")); + &test ($out,0x0f); + &setz ("al"); # !out_misaligned + &test ($inp,0x0f); + &setz ("bl"); # !inp_misaligned + &test ("eax","ebx"); + &jnz (&label("${mode}_aligned")); + &neg ("eax"); + } + &mov ($chunk,$PADLOCK_CHUNK); + ¬ ("eax"); # out_misaligned?-1:0 + &lea ("ebp",&DWP(-24,"esp")); + &cmp ($len,$chunk); + &cmovc ($chunk,$len); # chunk=len>PADLOCK_CHUNK?PADLOCK_CHUNK:len + &and ("eax",$chunk); # out_misaligned?chunk:0 + &mov ($chunk,$len); + &neg ("eax"); + &and ($chunk,$PADLOCK_CHUNK-1); # chunk=len%PADLOCK_CHUNK + &lea ("esp",&DWP(0,"eax","ebp")); # alloca + &mov ("eax",$PADLOCK_CHUNK); + &cmovz ($chunk,"eax"); # chunk=chunk?:PADLOCK_CHUNK + &mov ("eax","ebp"); + &and ("ebp",-16); + &and ("esp",-16); + &mov (&DWP(16,"ebp"),"eax"); + if ($PADLOCK_PREFETCH{$mode}) { + &cmp ($len,$chunk); + &ja (&label("${mode}_loop")); + &mov ("eax",$inp); # check if prefetch crosses page + &cmp ("ebp","esp"); + &cmove ("eax",$out); + &add ("eax",$len); + &neg ("eax"); + &and ("eax",0xfff); # distance to page boundary + &cmp ("eax",$PADLOCK_PREFETCH{$mode}); + &mov ("eax",-$PADLOCK_PREFETCH{$mode}); + &cmovae ("eax",$chunk); # mask=distanceExceptionCode == STATUS_ACCESS_VIOLATION + &jne (&label("ret")); + &add (&DWP(184,"ecx"),4); # skip over rep sha* + &mov ("eax",0); # ExceptionContinueExecution +&set_label("ret"); + &ret (); +&function_end_B("_win32_segv_handler"); +&safeseh("_win32_segv_handler") if ($::win32); + +&function_begin_B("padlock_sha1_oneshot"); + &push ("edi"); + &push ("esi"); + &xor ("eax","eax"); + &mov ("edi",&wparam(0)); + &mov ("esi",&wparam(1)); + &mov ("ecx",&wparam(2)); + if ($::win32 or $::coff) { + &push (&::islabel("_win32_segv_handler")); + &data_byte(0x64,0xff,0x30); # push %fs:(%eax) + &data_byte(0x64,0x89,0x20); # mov %esp,%fs:(%eax) + } + &mov ("edx","esp"); # put aside %esp + &add ("esp",-128); # 32 is enough but spec says 128 + &movups ("xmm0",&QWP(0,"edi")); # copy-in context + &and ("esp",-16); + &mov ("eax",&DWP(16,"edi")); + &movaps (&QWP(0,"esp"),"xmm0"); + &mov ("edi","esp"); + &mov (&DWP(16,"esp"),"eax"); + &xor ("eax","eax"); + &data_byte(0xf3,0x0f,0xa6,0xc8); # rep xsha1 + &movaps ("xmm0",&QWP(0,"esp")); + &mov ("eax",&DWP(16,"esp")); + &mov ("esp","edx"); # restore %esp + if ($::win32 or $::coff) { + &data_byte(0x64,0x8f,0x05,0,0,0,0); # pop %fs:0 + &lea ("esp",&DWP(4,"esp")); + } + &mov ("edi",&wparam(0)); + &movups (&QWP(0,"edi"),"xmm0"); # copy-out context + &mov (&DWP(16,"edi"),"eax"); + &pop ("esi"); + &pop ("edi"); + &ret (); +&function_end_B("padlock_sha1_oneshot"); + +&function_begin_B("padlock_sha1_blocks"); + &push ("edi"); + &push ("esi"); + &mov ("edi",&wparam(0)); + &mov ("esi",&wparam(1)); + &mov ("edx","esp"); # put aside %esp + &mov ("ecx",&wparam(2)); + &add ("esp",-128); + &movups ("xmm0",&QWP(0,"edi")); # copy-in context + &and ("esp",-16); + &mov ("eax",&DWP(16,"edi")); + &movaps (&QWP(0,"esp"),"xmm0"); + &mov ("edi","esp"); + &mov (&DWP(16,"esp"),"eax"); + &mov ("eax",-1); + &data_byte(0xf3,0x0f,0xa6,0xc8); # rep xsha1 + &movaps ("xmm0",&QWP(0,"esp")); + &mov ("eax",&DWP(16,"esp")); + &mov ("esp","edx"); # restore %esp + &mov ("edi",&wparam(0)); + &movups (&QWP(0,"edi"),"xmm0"); # copy-out context + &mov (&DWP(16,"edi"),"eax"); + &pop ("esi"); + &pop ("edi"); + &ret (); +&function_end_B("padlock_sha1_blocks"); + +&function_begin_B("padlock_sha256_oneshot"); + &push ("edi"); + &push ("esi"); + &xor ("eax","eax"); + &mov ("edi",&wparam(0)); + &mov ("esi",&wparam(1)); + &mov ("ecx",&wparam(2)); + if ($::win32 or $::coff) { + &push (&::islabel("_win32_segv_handler")); + &data_byte(0x64,0xff,0x30); # push %fs:(%eax) + &data_byte(0x64,0x89,0x20); # mov %esp,%fs:(%eax) + } + &mov ("edx","esp"); # put aside %esp + &add ("esp",-128); + &movups ("xmm0",&QWP(0,"edi")); # copy-in context + &and ("esp",-16); + &movups ("xmm1",&QWP(16,"edi")); + &movaps (&QWP(0,"esp"),"xmm0"); + &mov ("edi","esp"); + &movaps (&QWP(16,"esp"),"xmm1"); + &xor ("eax","eax"); + &data_byte(0xf3,0x0f,0xa6,0xd0); # rep xsha256 + &movaps ("xmm0",&QWP(0,"esp")); + &movaps ("xmm1",&QWP(16,"esp")); + &mov ("esp","edx"); # restore %esp + if ($::win32 or $::coff) { + &data_byte(0x64,0x8f,0x05,0,0,0,0); # pop %fs:0 + &lea ("esp",&DWP(4,"esp")); + } + &mov ("edi",&wparam(0)); + &movups (&QWP(0,"edi"),"xmm0"); # copy-out context + &movups (&QWP(16,"edi"),"xmm1"); + &pop ("esi"); + &pop ("edi"); + &ret (); +&function_end_B("padlock_sha256_oneshot"); + +&function_begin_B("padlock_sha256_blocks"); + &push ("edi"); + &push ("esi"); + &mov ("edi",&wparam(0)); + &mov ("esi",&wparam(1)); + &mov ("ecx",&wparam(2)); + &mov ("edx","esp"); # put aside %esp + &add ("esp",-128); + &movups ("xmm0",&QWP(0,"edi")); # copy-in context + &and ("esp",-16); + &movups ("xmm1",&QWP(16,"edi")); + &movaps (&QWP(0,"esp"),"xmm0"); + &mov ("edi","esp"); + &movaps (&QWP(16,"esp"),"xmm1"); + &mov ("eax",-1); + &data_byte(0xf3,0x0f,0xa6,0xd0); # rep xsha256 + &movaps ("xmm0",&QWP(0,"esp")); + &movaps ("xmm1",&QWP(16,"esp")); + &mov ("esp","edx"); # restore %esp + &mov ("edi",&wparam(0)); + &movups (&QWP(0,"edi"),"xmm0"); # copy-out context + &movups (&QWP(16,"edi"),"xmm1"); + &pop ("esi"); + &pop ("edi"); + &ret (); +&function_end_B("padlock_sha256_blocks"); + +&function_begin_B("padlock_sha512_blocks"); + &push ("edi"); + &push ("esi"); + &mov ("edi",&wparam(0)); + &mov ("esi",&wparam(1)); + &mov ("ecx",&wparam(2)); + &mov ("edx","esp"); # put aside %esp + &add ("esp",-128); + &movups ("xmm0",&QWP(0,"edi")); # copy-in context + &and ("esp",-16); + &movups ("xmm1",&QWP(16,"edi")); + &movups ("xmm2",&QWP(32,"edi")); + &movups ("xmm3",&QWP(48,"edi")); + &movaps (&QWP(0,"esp"),"xmm0"); + &mov ("edi","esp"); + &movaps (&QWP(16,"esp"),"xmm1"); + &movaps (&QWP(32,"esp"),"xmm2"); + &movaps (&QWP(48,"esp"),"xmm3"); + &data_byte(0xf3,0x0f,0xa6,0xe0); # rep xsha512 + &movaps ("xmm0",&QWP(0,"esp")); + &movaps ("xmm1",&QWP(16,"esp")); + &movaps ("xmm2",&QWP(32,"esp")); + &movaps ("xmm3",&QWP(48,"esp")); + &mov ("esp","edx"); # restore %esp + &mov ("edi",&wparam(0)); + &movups (&QWP(0,"edi"),"xmm0"); # copy-out context + &movups (&QWP(16,"edi"),"xmm1"); + &movups (&QWP(32,"edi"),"xmm2"); + &movups (&QWP(48,"edi"),"xmm3"); + &pop ("esi"); + &pop ("edi"); + &ret (); +&function_end_B("padlock_sha512_blocks"); + +&asciz ("VIA Padlock x86 module, CRYPTOGAMS by "); +&align (16); + +&dataseg(); +# Essentially this variable belongs in thread local storage. +# Having this variable global on the other hand can only cause +# few bogus key reloads [if any at all on signle-CPU system], +# so we accept the penalty... +&set_label("padlock_saved_context",4); +&data_word(0); + +&asm_finish(); diff --git a/engines/asm/e_padlock-x86_64.pl b/engines/asm/e_padlock-x86_64.pl new file mode 100644 index 0000000..297561a --- /dev/null +++ b/engines/asm/e_padlock-x86_64.pl @@ -0,0 +1,566 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# September 2011 +# +# Assembler helpers for Padlock engine. See even e_padlock-x86.pl for +# details. + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../crypto/perlasm/x86_64-xlate.pl" and -f $xlate) or +die "can't locate x86_64-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour $output"; + +$code=".text\n"; + +%PADLOCK_PREFETCH=(ecb=>128, cbc=>64, ctr32=>32); # prefetch errata +$PADLOCK_CHUNK=512; # Must be a power of 2 between 32 and 2^20 + +$ctx="%rdx"; +$out="%rdi"; +$inp="%rsi"; +$len="%rcx"; +$chunk="%rbx"; + +($arg1,$arg2,$arg3,$arg4)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order + ("%rdi","%rsi","%rdx","%rcx"); # Unix order + +$code.=<<___; +.globl padlock_capability +.type padlock_capability,\@abi-omnipotent +.align 16 +padlock_capability: + mov %rbx,%r8 + xor %eax,%eax + cpuid + xor %eax,%eax + cmp \$`"0x".unpack("H*",'tneC')`,%ebx + jne .Lnoluck + cmp \$`"0x".unpack("H*",'Hrua')`,%edx + jne .Lnoluck + cmp \$`"0x".unpack("H*",'slua')`,%ecx + jne .Lnoluck + mov \$0xC0000000,%eax + cpuid + mov %eax,%edx + xor %eax,%eax + cmp \$0xC0000001,%edx + jb .Lnoluck + mov \$0xC0000001,%eax + cpuid + mov %edx,%eax + and \$0xffffffef,%eax + or \$0x10,%eax # set Nano bit#4 +.Lnoluck: + mov %r8,%rbx + ret +.size padlock_capability,.-padlock_capability + +.globl padlock_key_bswap +.type padlock_key_bswap,\@abi-omnipotent,0 +.align 16 +padlock_key_bswap: + mov 240($arg1),%edx +.Lbswap_loop: + mov ($arg1),%eax + bswap %eax + mov %eax,($arg1) + lea 4($arg1),$arg1 + sub \$1,%edx + jnz .Lbswap_loop + ret +.size padlock_key_bswap,.-padlock_key_bswap + +.globl padlock_verify_context +.type padlock_verify_context,\@abi-omnipotent +.align 16 +padlock_verify_context: + mov $arg1,$ctx + pushf + lea .Lpadlock_saved_context(%rip),%rax + call _padlock_verify_ctx + lea 8(%rsp),%rsp + ret +.size padlock_verify_context,.-padlock_verify_context + +.type _padlock_verify_ctx,\@abi-omnipotent +.align 16 +_padlock_verify_ctx: + mov 8(%rsp),%r8 + bt \$30,%r8 + jnc .Lverified + cmp (%rax),$ctx + je .Lverified + pushf + popf +.Lverified: + mov $ctx,(%rax) + ret +.size _padlock_verify_ctx,.-_padlock_verify_ctx + +.globl padlock_reload_key +.type padlock_reload_key,\@abi-omnipotent +.align 16 +padlock_reload_key: + pushf + popf + ret +.size padlock_reload_key,.-padlock_reload_key + +.globl padlock_aes_block +.type padlock_aes_block,\@function,3 +.align 16 +padlock_aes_block: + mov %rbx,%r8 + mov \$1,$len + lea 32($ctx),%rbx # key + lea 16($ctx),$ctx # control word + .byte 0xf3,0x0f,0xa7,0xc8 # rep xcryptecb + mov %r8,%rbx + ret +.size padlock_aes_block,.-padlock_aes_block + +.globl padlock_xstore +.type padlock_xstore,\@function,2 +.align 16 +padlock_xstore: + mov %esi,%edx + .byte 0x0f,0xa7,0xc0 # xstore + ret +.size padlock_xstore,.-padlock_xstore + +.globl padlock_sha1_oneshot +.type padlock_sha1_oneshot,\@function,3 +.align 16 +padlock_sha1_oneshot: + mov %rdx,%rcx + mov %rdi,%rdx # put aside %rdi + movups (%rdi),%xmm0 # copy-in context + sub \$128+8,%rsp + mov 16(%rdi),%eax + movaps %xmm0,(%rsp) + mov %rsp,%rdi + mov %eax,16(%rsp) + xor %rax,%rax + .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1 + movaps (%rsp),%xmm0 + mov 16(%rsp),%eax + add \$128+8,%rsp + movups %xmm0,(%rdx) # copy-out context + mov %eax,16(%rdx) + ret +.size padlock_sha1_oneshot,.-padlock_sha1_oneshot + +.globl padlock_sha1_blocks +.type padlock_sha1_blocks,\@function,3 +.align 16 +padlock_sha1_blocks: + mov %rdx,%rcx + mov %rdi,%rdx # put aside %rdi + movups (%rdi),%xmm0 # copy-in context + sub \$128+8,%rsp + mov 16(%rdi),%eax + movaps %xmm0,(%rsp) + mov %rsp,%rdi + mov %eax,16(%rsp) + mov \$-1,%rax + .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1 + movaps (%rsp),%xmm0 + mov 16(%rsp),%eax + add \$128+8,%rsp + movups %xmm0,(%rdx) # copy-out context + mov %eax,16(%rdx) + ret +.size padlock_sha1_blocks,.-padlock_sha1_blocks + +.globl padlock_sha256_oneshot +.type padlock_sha256_oneshot,\@function,3 +.align 16 +padlock_sha256_oneshot: + mov %rdx,%rcx + mov %rdi,%rdx # put aside %rdi + movups (%rdi),%xmm0 # copy-in context + sub \$128+8,%rsp + movups 16(%rdi),%xmm1 + movaps %xmm0,(%rsp) + mov %rsp,%rdi + movaps %xmm1,16(%rsp) + xor %rax,%rax + .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256 + movaps (%rsp),%xmm0 + movaps 16(%rsp),%xmm1 + add \$128+8,%rsp + movups %xmm0,(%rdx) # copy-out context + movups %xmm1,16(%rdx) + ret +.size padlock_sha256_oneshot,.-padlock_sha256_oneshot + +.globl padlock_sha256_blocks +.type padlock_sha256_blocks,\@function,3 +.align 16 +padlock_sha256_blocks: + mov %rdx,%rcx + mov %rdi,%rdx # put aside %rdi + movups (%rdi),%xmm0 # copy-in context + sub \$128+8,%rsp + movups 16(%rdi),%xmm1 + movaps %xmm0,(%rsp) + mov %rsp,%rdi + movaps %xmm1,16(%rsp) + mov \$-1,%rax + .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256 + movaps (%rsp),%xmm0 + movaps 16(%rsp),%xmm1 + add \$128+8,%rsp + movups %xmm0,(%rdx) # copy-out context + movups %xmm1,16(%rdx) + ret +.size padlock_sha256_blocks,.-padlock_sha256_blocks + +.globl padlock_sha512_blocks +.type padlock_sha512_blocks,\@function,3 +.align 16 +padlock_sha512_blocks: + mov %rdx,%rcx + mov %rdi,%rdx # put aside %rdi + movups (%rdi),%xmm0 # copy-in context + sub \$128+8,%rsp + movups 16(%rdi),%xmm1 + movups 32(%rdi),%xmm2 + movups 48(%rdi),%xmm3 + movaps %xmm0,(%rsp) + mov %rsp,%rdi + movaps %xmm1,16(%rsp) + movaps %xmm2,32(%rsp) + movaps %xmm3,48(%rsp) + .byte 0xf3,0x0f,0xa6,0xe0 # rep xha512 + movaps (%rsp),%xmm0 + movaps 16(%rsp),%xmm1 + movaps 32(%rsp),%xmm2 + movaps 48(%rsp),%xmm3 + add \$128+8,%rsp + movups %xmm0,(%rdx) # copy-out context + movups %xmm1,16(%rdx) + movups %xmm2,32(%rdx) + movups %xmm3,48(%rdx) + ret +.size padlock_sha512_blocks,.-padlock_sha512_blocks +___ + +sub generate_mode { +my ($mode,$opcode) = @_; +# int padlock_$mode_encrypt(void *out, const void *inp, +# struct padlock_cipher_data *ctx, size_t len); +$code.=<<___; +.globl padlock_${mode}_encrypt +.type padlock_${mode}_encrypt,\@function,4 +.align 16 +padlock_${mode}_encrypt: + push %rbp + push %rbx + + xor %eax,%eax + test \$15,$ctx + jnz .L${mode}_abort + test \$15,$len + jnz .L${mode}_abort + lea .Lpadlock_saved_context(%rip),%rax + pushf + cld + call _padlock_verify_ctx + lea 16($ctx),$ctx # control word + xor %eax,%eax + xor %ebx,%ebx + testl \$`1<<5`,($ctx) # align bit in control word + jnz .L${mode}_aligned + test \$0x0f,$out + setz %al # !out_misaligned + test \$0x0f,$inp + setz %bl # !inp_misaligned + test %ebx,%eax + jnz .L${mode}_aligned + neg %rax + mov \$$PADLOCK_CHUNK,$chunk + not %rax # out_misaligned?-1:0 + lea (%rsp),%rbp + cmp $chunk,$len + cmovc $len,$chunk # chunk=len>PADLOCK_CHUNK?PADLOCK_CHUNK:len + and $chunk,%rax # out_misaligned?chunk:0 + mov $len,$chunk + neg %rax + and \$$PADLOCK_CHUNK-1,$chunk # chunk%=PADLOCK_CHUNK + lea (%rax,%rbp),%rsp + mov \$$PADLOCK_CHUNK,%rax + cmovz %rax,$chunk # chunk=chunk?:PADLOCK_CHUNK +___ +$code.=<<___ if ($mode eq "ctr32"); +.L${mode}_reenter: + mov -4($ctx),%eax # pull 32-bit counter + bswap %eax + neg %eax + and \$`$PADLOCK_CHUNK/16-1`,%eax + mov \$$PADLOCK_CHUNK,$chunk + shl \$4,%eax + cmovz $chunk,%rax + cmp %rax,$len + cmova %rax,$chunk # don't let counter cross PADLOCK_CHUNK + cmovbe $len,$chunk +___ +$code.=<<___ if ($PADLOCK_PREFETCH{$mode}); + cmp $chunk,$len + ja .L${mode}_loop + mov $inp,%rax # check if prefetch crosses page + cmp %rsp,%rbp + cmove $out,%rax + add $len,%rax + neg %rax + and \$0xfff,%rax # distance to page boundary + cmp \$$PADLOCK_PREFETCH{$mode},%rax + mov \$-$PADLOCK_PREFETCH{$mode},%rax + cmovae $chunk,%rax # mask=distance" +.align 16 +.data +.align 8 +.Lpadlock_saved_context: + .quad 0 +___ +$code =~ s/\`([^\`]*)\`/eval($1)/gem; + +print $code; + +close STDOUT; diff --git a/engines/e_padlock.c b/engines/e_padlock.c index 9f7a85a..c71eeb5 100644 --- a/engines/e_padlock.c +++ b/engines/e_padlock.c @@ -76,6 +76,7 @@ #endif #include #include +#include #ifndef OPENSSL_NO_HW #ifndef OPENSSL_NO_HW_PADLOCK @@ -95,22 +96,22 @@ /* VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it doesn't exist elsewhere, but it - even can't be compiled on other platforms! + even can't be compiled on other platforms! */ - In addition, because of the heavy use of inline assembler, - compiler choice is limited to GCC and Microsoft C. */ #undef COMPILE_HW_PADLOCK -#if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM) -# if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) || \ - (defined(_MSC_VER) && defined(_M_IX86)) +#if !defined(I386_ONLY) && !defined(OPENSSL_NO_ASM) +# if defined(__i386__) || defined(__i386) || \ + defined(__x86_64__) || defined(__x86_64) || \ + defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \ + defined(__INTEL__) # define COMPILE_HW_PADLOCK +# ifdef OPENSSL_NO_DYNAMIC_ENGINE +static ENGINE *ENGINE_padlock (void); +# endif # endif #endif #ifdef OPENSSL_NO_DYNAMIC_ENGINE -#ifdef COMPILE_HW_PADLOCK -static ENGINE *ENGINE_padlock (void); -#endif void ENGINE_load_padlock (void) { @@ -127,19 +128,6 @@ void ENGINE_load_padlock (void) #endif #ifdef COMPILE_HW_PADLOCK -/* We do these includes here to avoid header problems on platforms that - do not have the VIA padlock anyway... */ -#include -#ifdef _WIN32 -# include -# ifndef alloca -# define alloca _alloca -# endif -#elif defined(__GNUC__) -# ifndef alloca -# define alloca(s) __builtin_alloca(s) -# endif -#endif /* Function for ENGINE detection and control */ static int padlock_available(void); @@ -160,9 +148,6 @@ static char padlock_name[100]; /* Available features */ static int padlock_use_ace = 0; /* Advanced Cryptography Engine */ static int padlock_use_rng = 0; /* Random Number Generator */ -#ifndef OPENSSL_NO_AES -static int padlock_aes_align_required = 1; -#endif /* ===== Engine "management" functions ===== */ @@ -200,7 +185,6 @@ padlock_bind_helper(ENGINE *e) } #ifdef OPENSSL_NO_DYNAMIC_ENGINE - /* Constructor */ static ENGINE * ENGINE_padlock(void) @@ -218,7 +202,6 @@ ENGINE_padlock(void) return eng; } - #endif /* Check availability of the engine */ @@ -283,98 +266,37 @@ struct padlock_cipher_data } cword; /* Control word */ AES_KEY ks; /* Encryption key */ }; - -/* - * Essentially this variable belongs in thread local storage. - * Having this variable global on the other hand can only cause - * few bogus key reloads [if any at all on single-CPU system], - * so we accept the penatly... - */ -static volatile struct padlock_cipher_data *padlock_saved_context; #endif -/* - * ======================================================= - * Inline assembler section(s). - * ======================================================= - * Order of arguments is chosen to facilitate Windows port - * using __fastcall calling convention. If you wish to add - * more routines, keep in mind that first __fastcall - * argument is passed in %ecx and second - in %edx. - * ======================================================= - */ -#if defined(__GNUC__) && __GNUC__>=2 -/* - * As for excessive "push %ebx"/"pop %ebx" found all over. - * When generating position-independent code GCC won't let - * us use "b" in assembler templates nor even respect "ebx" - * in "clobber description." Therefore the trouble... - */ - -/* Helper function - check if a CPUID instruction - is available on this CPU */ -static int -padlock_insn_cpuid_available(void) -{ - int result = -1; - - /* We're checking if the bit #21 of EFLAGS - can be toggled. If yes = CPUID is available. */ - asm volatile ( - "pushf\n" - "popl %%eax\n" - "xorl $0x200000, %%eax\n" - "movl %%eax, %%ecx\n" - "andl $0x200000, %%ecx\n" - "pushl %%eax\n" - "popf\n" - "pushf\n" - "popl %%eax\n" - "andl $0x200000, %%eax\n" - "xorl %%eax, %%ecx\n" - "movl %%ecx, %0\n" - : "=r" (result) : : "eax", "ecx"); - - return (result == 0); -} +/* Interface to assembler module */ +unsigned int padlock_capability(); +void padlock_key_bswap(AES_KEY *key); +void padlock_verify_context(struct padlock_cipher_data *ctx); +void padlock_reload_key(); +void padlock_aes_block(void *out, const void *inp, + struct padlock_cipher_data *ctx); +int padlock_ecb_encrypt(void *out, const void *inp, + struct padlock_cipher_data *ctx, size_t len); +int padlock_cbc_encrypt(void *out, const void *inp, + struct padlock_cipher_data *ctx, size_t len); +int padlock_cfb_encrypt(void *out, const void *inp, + struct padlock_cipher_data *ctx, size_t len); +int padlock_ofb_encrypt(void *out, const void *inp, + struct padlock_cipher_data *ctx, size_t len); +int padlock_ctr32_encrypt(void *out, const void *inp, + struct padlock_cipher_data *ctx, size_t len); +int padlock_xstore(void *out,int edx); +void padlock_sha1_oneshot(void *ctx,const void *inp,size_t len); +void padlock_sha1(void *ctx,const void *inp,size_t len); +void padlock_sha256_oneshot(void *ctx,const void *inp,size_t len); +void padlock_sha256(void *ctx,const void *inp,size_t len); /* Load supported features of the CPU to see if the PadLock is available. */ static int padlock_available(void) { - char vendor_string[16]; - unsigned int eax, edx; - - /* First check if the CPUID instruction is available at all... */ - if (! padlock_insn_cpuid_available()) - return 0; - - /* Are we running on the Centaur (VIA) CPU? */ - eax = 0x00000000; - vendor_string[12] = 0; - asm volatile ( - "pushl %%ebx\n" - "cpuid\n" - "movl %%ebx,(%%edi)\n" - "movl %%edx,4(%%edi)\n" - "movl %%ecx,8(%%edi)\n" - "popl %%ebx" - : "+a"(eax) : "D"(vendor_string) : "ecx", "edx"); - if (strcmp(vendor_string, "CentaurHauls") != 0) - return 0; - - /* Check for Centaur Extended Feature Flags presence */ - eax = 0xC0000000; - asm volatile ("pushl %%ebx; cpuid; popl %%ebx" - : "+a"(eax) : : "ecx", "edx"); - if (eax < 0xC0000001) - return 0; - - /* Read the Centaur Extended Feature Flags */ - eax = 0xC0000001; - asm volatile ("pushl %%ebx; cpuid; popl %%ebx" - : "+a"(eax), "=d"(edx) : : "ecx"); + unsigned int edx = padlock_capability(); /* Fill up some flags */ padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6)); @@ -383,254 +305,6 @@ padlock_available(void) return padlock_use_ace + padlock_use_rng; } -#ifndef OPENSSL_NO_AES -/* Our own htonl()/ntohl() */ -static inline void -padlock_bswapl(AES_KEY *ks) -{ - size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]); - unsigned int *key = ks->rd_key; - - while (i--) { - asm volatile ("bswapl %0" : "+r"(*key)); - key++; - } -} -#endif - -/* Force key reload from memory to the CPU microcode. - Loading EFLAGS from the stack clears EFLAGS[30] - which does the trick. */ -static inline void -padlock_reload_key(void) -{ - asm volatile ("pushfl; popfl"); -} - -#ifndef OPENSSL_NO_AES -/* - * This is heuristic key context tracing. At first one - * believes that one should use atomic swap instructions, - * but it's not actually necessary. Point is that if - * padlock_saved_context was changed by another thread - * after we've read it and before we compare it with cdata, - * our key *shall* be reloaded upon thread context switch - * and we are therefore set in either case... - */ -static inline void -padlock_verify_context(struct padlock_cipher_data *cdata) -{ - asm volatile ( - "pushfl\n" -" btl $30,(%%esp)\n" -" jnc 1f\n" -" cmpl %2,%1\n" -" je 1f\n" -" popfl\n" -" subl $4,%%esp\n" -"1: addl $4,%%esp\n" -" movl %2,%0" - :"+m"(padlock_saved_context) - : "r"(padlock_saved_context), "r"(cdata) : "cc"); -} - -/* Template for padlock_xcrypt_* modes */ -/* BIG FAT WARNING: - * The offsets used with 'leal' instructions - * describe items of the 'padlock_cipher_data' - * structure. - */ -#define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \ -static inline void *name(size_t cnt, \ - struct padlock_cipher_data *cdata, \ - void *out, const void *inp) \ -{ void *iv; \ - asm volatile ( "pushl %%ebx\n" \ - " leal 16(%0),%%edx\n" \ - " leal 32(%0),%%ebx\n" \ - rep_xcrypt "\n" \ - " popl %%ebx" \ - : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \ - : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \ - : "edx", "cc", "memory"); \ - return iv; \ -} - -/* Generate all functions with appropriate opcodes */ -PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8") /* rep xcryptecb */ -PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0") /* rep xcryptcbc */ -PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0") /* rep xcryptcfb */ -PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8") /* rep xcryptofb */ -#endif - -/* The RNG call itself */ -static inline unsigned int -padlock_xstore(void *addr, unsigned int edx_in) -{ - unsigned int eax_out; - - asm volatile (".byte 0x0f,0xa7,0xc0" /* xstore */ - : "=a"(eax_out),"=m"(*(unsigned *)addr) - : "D"(addr), "d" (edx_in) - ); - - return eax_out; -} - -/* Why not inline 'rep movsd'? I failed to find information on what - * value in Direction Flag one can expect and consequently have to - * apply "better-safe-than-sorry" approach and assume "undefined." - * I could explicitly clear it and restore the original value upon - * return from padlock_aes_cipher, but it's presumably too much - * trouble for too little gain... - * - * In case you wonder 'rep xcrypt*' instructions above are *not* - * affected by the Direction Flag and pointers advance toward - * larger addresses unconditionally. - */ -static inline unsigned char * -padlock_memcpy(void *dst,const void *src,size_t n) -{ - long *d=dst; - const long *s=src; - - n /= sizeof(*d); - do { *d++ = *s++; } while (--n); - - return dst; -} - -#elif defined(_MSC_VER) -/* - * Unlike GCC these are real functions. In order to minimize impact - * on performance we adhere to __fastcall calling convention in - * order to get two first arguments passed through %ecx and %edx. - * Which kind of suits very well, as instructions in question use - * both %ecx and %edx as input:-) - */ -#define REP_XCRYPT(code) \ - _asm _emit 0xf3 \ - _asm _emit 0x0f _asm _emit 0xa7 \ - _asm _emit code - -/* BIG FAT WARNING: - * The offsets used with 'lea' instructions - * describe items of the 'padlock_cipher_data' - * structure. - */ -#define PADLOCK_XCRYPT_ASM(name,code) \ -static void * __fastcall \ - name (size_t cnt, void *cdata, \ - void *outp, const void *inp) \ -{ _asm mov eax,edx \ - _asm lea edx,[eax+16] \ - _asm lea ebx,[eax+32] \ - _asm mov edi,outp \ - _asm mov esi,inp \ - REP_XCRYPT(code) \ -} - -PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb,0xc8) -PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc,0xd0) -PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb,0xe0) -PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb,0xe8) - -static int __fastcall -padlock_xstore(void *outp,unsigned int code) -{ _asm mov edi,ecx - _asm _emit 0x0f _asm _emit 0xa7 _asm _emit 0xc0 -} - -static void __fastcall -padlock_reload_key(void) -{ _asm pushfd _asm popfd } - -static void __fastcall -padlock_verify_context(void *cdata) -{ _asm { - pushfd - bt DWORD PTR[esp],30 - jnc skip - cmp ecx,padlock_saved_context - je skip - popfd - sub esp,4 - skip: add esp,4 - mov padlock_saved_context,ecx - } -} - -static int -padlock_available(void) -{ _asm { - pushfd - pop eax - mov ecx,eax - xor eax,1<<21 - push eax - popfd - pushfd - pop eax - xor eax,ecx - bt eax,21 - jnc noluck - mov eax,0 - cpuid - xor eax,eax - cmp ebx,'tneC' - jne noluck - cmp edx,'Hrua' - jne noluck - cmp ecx,'slua' - jne noluck - mov eax,0xC0000000 - cpuid - mov edx,eax - xor eax,eax - cmp edx,0xC0000001 - jb noluck - mov eax,0xC0000001 - cpuid - xor eax,eax - bt edx,6 - jnc skip_a - bt edx,7 - jnc skip_a - mov padlock_use_ace,1 - inc eax - skip_a: bt edx,2 - jnc skip_r - bt edx,3 - jnc skip_r - mov padlock_use_rng,1 - inc eax - skip_r: - noluck: - } -} - -static void __fastcall -padlock_bswapl(void *key) -{ _asm { - pushfd - cld - mov esi,ecx - mov edi,ecx - mov ecx,60 - up: lodsd - bswap eax - stosd - loop up - popfd - } -} - -/* MS actually specifies status of Direction Flag and compiler even - * manages to compile following as 'rep movsd' all by itself... - */ -#define padlock_memcpy(o,i,n) ((unsigned char *)memcpy((o),(i),(n)&~3U)) -#endif - /* ===== AES encryption/decryption ===== */ #ifndef OPENSSL_NO_AES @@ -664,16 +338,19 @@ static int padlock_cipher_nids[] = { NID_aes_128_cbc, NID_aes_128_cfb, NID_aes_128_ofb, + NID_aes_128_ctr, NID_aes_192_ecb, NID_aes_192_cbc, NID_aes_192_cfb, NID_aes_192_ofb, + NID_aes_192_ctr, NID_aes_256_ecb, NID_aes_256_cbc, NID_aes_256_cfb, NID_aes_256_ofb, + NID_aes_256_ctr }; static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/ sizeof(padlock_cipher_nids[0])); @@ -681,18 +358,186 @@ static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/ /* Function prototypes ... */ static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); -static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, - const unsigned char *in, size_t nbytes); #define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \ ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) ) #define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\ NEAREST_ALIGNED(ctx->cipher_data)) +static int +padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, + const unsigned char *in_arg, size_t nbytes) +{ + return padlock_ecb_encrypt(out_arg,in_arg, + ALIGNED_CIPHER_DATA(ctx),nbytes); +} +static int +padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, + const unsigned char *in_arg, size_t nbytes) +{ + struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); + int ret; + + memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); + if ((ret = padlock_cbc_encrypt(out_arg,in_arg,cdata,nbytes))) + memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE); + return ret; +} + +static int +padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, + const unsigned char *in_arg, size_t nbytes) +{ + struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); + size_t chunk; + + if ((chunk = ctx->num)) { /* borrow chunk variable */ + unsigned char *ivp=ctx->iv; + + if (chunk >= AES_BLOCK_SIZE) + return 0; /* bogus value */ + + if (ctx->encrypt) + while (chunknum = chunk%AES_BLOCK_SIZE; + } + + if (nbytes == 0) + return 1; + + memcpy (cdata->iv, ctx->iv, AES_BLOCK_SIZE); + + if ((chunk = nbytes & ~(AES_BLOCK_SIZE-1))) { + if (!padlock_cfb_encrypt(out_arg,in_arg,cdata,chunk)) + return 0; + nbytes -= chunk; + } + + if (nbytes) { + unsigned char *ivp = cdata->iv; + + out_arg += chunk; + in_arg += chunk; + ctx->num = nbytes; + if (cdata->cword.b.encdec) { + cdata->cword.b.encdec=0; + padlock_reload_key(); + padlock_aes_block(ivp,ivp,cdata); + cdata->cword.b.encdec=1; + padlock_reload_key(); + while(nbytes) { + unsigned char c = *(in_arg++); + *(out_arg++) = c ^ *ivp; + *(ivp++) = c, nbytes--; + } + } + else { padlock_reload_key(); + padlock_aes_block(ivp,ivp,cdata); + padlock_reload_key(); + while (nbytes) { + *ivp = *(out_arg++) = *(in_arg++) ^ *ivp; + ivp++, nbytes--; + } + } + } + + memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE); + + return 1; +} + +static int +padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, + const unsigned char *in_arg, size_t nbytes) +{ + struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); + size_t chunk; + + /* ctx->num is maintained in byte-oriented modes, + such as CFB and OFB... */ + if ((chunk = ctx->num)) { /* borrow chunk variable */ + unsigned char *ivp=ctx->iv; + + if (chunk >= AES_BLOCK_SIZE) + return 0; /* bogus value */ + + while (chunknum = chunk%AES_BLOCK_SIZE; + } + + if (nbytes == 0) + return 1; + + memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); + + if ((chunk = nbytes & ~(AES_BLOCK_SIZE-1))) { + if (!padlock_ofb_encrypt(out_arg,in_arg,cdata,chunk)) + return 0; + nbytes -= chunk; + } + + if (nbytes) { + unsigned char *ivp = cdata->iv; + + out_arg += chunk; + in_arg += chunk; + ctx->num = nbytes; + padlock_reload_key(); /* empirically found */ + padlock_aes_block(ivp,ivp,cdata); + padlock_reload_key(); /* empirically found */ + while (nbytes) { + *(out_arg++) = *(in_arg++) ^ *ivp; + ivp++, nbytes--; + } + } + + memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE); + + return 1; +} + +static void padlock_ctr32_encrypt_glue(const unsigned char *in, + unsigned char *out, size_t blocks, + struct padlock_cipher_data *ctx, + const unsigned char *ivec) +{ + memcpy(ctx->iv,ivec,AES_BLOCK_SIZE); + padlock_ctr32_encrypt(out,in,ctx,AES_BLOCK_SIZE*blocks); +} + +static int +padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, + const unsigned char *in_arg, size_t nbytes) +{ + struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); + unsigned int num = ctx->num; + + CRYPTO_ctr128_encrypt_ctr32(in_arg,out_arg,nbytes, + cdata,ctx->iv,ctx->buf,&num, + (ctr128_f)padlock_ctr32_encrypt_glue); + + ctx->num = (size_t)num; + return 1; +} + #define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE #define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE #define EVP_CIPHER_block_size_OFB 1 #define EVP_CIPHER_block_size_CFB 1 +#define EVP_CIPHER_block_size_CTR 1 /* Declaring so many ciphers by hand would be a pain. Instead introduce a bit of preprocessor magic :-) */ @@ -704,7 +549,7 @@ static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \ AES_BLOCK_SIZE, \ 0 | EVP_CIPH_##umode##_MODE, \ padlock_aes_init_key, \ - padlock_aes_cipher, \ + padlock_##lmode##_cipher, \ NULL, \ sizeof(struct padlock_cipher_data) + 16, \ EVP_CIPHER_set_asn1_iv, \ @@ -717,16 +562,19 @@ DECLARE_AES_EVP(128,ecb,ECB); DECLARE_AES_EVP(128,cbc,CBC); DECLARE_AES_EVP(128,cfb,CFB); DECLARE_AES_EVP(128,ofb,OFB); +DECLARE_AES_EVP(128,ctr,CTR); DECLARE_AES_EVP(192,ecb,ECB); DECLARE_AES_EVP(192,cbc,CBC); DECLARE_AES_EVP(192,cfb,CFB); DECLARE_AES_EVP(192,ofb,OFB); +DECLARE_AES_EVP(192,ctr,CTR); DECLARE_AES_EVP(256,ecb,ECB); DECLARE_AES_EVP(256,cbc,CBC); DECLARE_AES_EVP(256,cfb,CFB); DECLARE_AES_EVP(256,ofb,OFB); +DECLARE_AES_EVP(256,ctr,CTR); static int padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) @@ -751,6 +599,9 @@ padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid case NID_aes_128_ofb: *cipher = &padlock_aes_128_ofb; break; + case NID_aes_128_ctr: + *cipher = &padlock_aes_128_ctr; + break; case NID_aes_192_ecb: *cipher = &padlock_aes_192_ecb; @@ -764,6 +615,9 @@ padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid case NID_aes_192_ofb: *cipher = &padlock_aes_192_ofb; break; + case NID_aes_192_ctr: + *cipher = &padlock_aes_192_ctr; + break; case NID_aes_256_ecb: *cipher = &padlock_aes_256_ecb; @@ -777,6 +631,9 @@ padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid case NID_aes_256_ofb: *cipher = &padlock_aes_256_ofb; break; + case NID_aes_256_ctr: + *cipher = &padlock_aes_256_ctr; + break; default: /* Sorry, we don't support this NID */ @@ -794,6 +651,7 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key, { struct padlock_cipher_data *cdata; int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8; + unsigned long mode = EVP_CIPHER_CTX_mode(ctx); if (key==NULL) return 0; /* ERROR */ @@ -801,7 +659,7 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key, memset(cdata, 0, sizeof(struct padlock_cipher_data)); /* Prepare Control word. */ - if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE) + if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE) cdata->cword.b.encdec = 0; else cdata->cword.b.encdec = (ctx->encrypt == 0); @@ -824,15 +682,15 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key, and is listed as hardware errata. They most likely will fix it at some point and then a check for stepping would be due here. */ - if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE || - EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE || - enc) - AES_set_encrypt_key(key, key_len, &cdata->ks); - else + if ((mode == EVP_CIPH_ECB_MODE || + mode == EVP_CIPH_CBC_MODE) + && !enc) AES_set_decrypt_key(key, key_len, &cdata->ks); + else + AES_set_encrypt_key(key, key_len, &cdata->ks); #ifndef AES_ASM /* OpenSSL C functions use byte-swapped extended key. */ - padlock_bswapl(&cdata->ks); + padlock_key_bswap(&cdata->ks); #endif cdata->cword.b.keygen = 1; break; @@ -852,321 +710,6 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key, return 1; } -/* - * Simplified version of padlock_aes_cipher() used when - * 1) both input and output buffers are at aligned addresses. - * or when - * 2) running on a newer CPU that doesn't require aligned buffers. - */ -static int -padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, - const unsigned char *in_arg, size_t nbytes) -{ - struct padlock_cipher_data *cdata; - void *iv; - - cdata = ALIGNED_CIPHER_DATA(ctx); - padlock_verify_context(cdata); - - switch (EVP_CIPHER_CTX_mode(ctx)) { - case EVP_CIPH_ECB_MODE: - padlock_xcrypt_ecb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); - break; - - case EVP_CIPH_CBC_MODE: - memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); - iv = padlock_xcrypt_cbc(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); - memcpy(ctx->iv, iv, AES_BLOCK_SIZE); - break; - - case EVP_CIPH_CFB_MODE: - memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); - iv = padlock_xcrypt_cfb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); - memcpy(ctx->iv, iv, AES_BLOCK_SIZE); - break; - - case EVP_CIPH_OFB_MODE: - memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); - padlock_xcrypt_ofb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); - memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE); - break; - - default: - return 0; - } - - memset(cdata->iv, 0, AES_BLOCK_SIZE); - - return 1; -} - -#ifndef PADLOCK_CHUNK -# define PADLOCK_CHUNK 512 /* Must be a power of 2 larger than 16 */ -#endif -#if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1) -# error "insane PADLOCK_CHUNK..." -#endif - -/* Re-align the arguments to 16-Bytes boundaries and run the - encryption function itself. This function is not AES-specific. */ -static int -padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, - const unsigned char *in_arg, size_t nbytes) -{ - struct padlock_cipher_data *cdata; - const void *inp; - unsigned char *out; - void *iv; - int inp_misaligned, out_misaligned, realign_in_loop; - size_t chunk, allocated=0; - - /* ctx->num is maintained in byte-oriented modes, - such as CFB and OFB... */ - if ((chunk = ctx->num)) { /* borrow chunk variable */ - unsigned char *ivp=ctx->iv; - - switch (EVP_CIPHER_CTX_mode(ctx)) { - case EVP_CIPH_CFB_MODE: - if (chunk >= AES_BLOCK_SIZE) - return 0; /* bogus value */ - - if (ctx->encrypt) - while (chunknum = chunk%AES_BLOCK_SIZE; - break; - case EVP_CIPH_OFB_MODE: - if (chunk >= AES_BLOCK_SIZE) - return 0; /* bogus value */ - - while (chunknum = chunk%AES_BLOCK_SIZE; - break; - } - } - - if (nbytes == 0) - return 1; -#if 0 - if (nbytes % AES_BLOCK_SIZE) - return 0; /* are we expected to do tail processing? */ -#else - /* nbytes is always multiple of AES_BLOCK_SIZE in ECB and CBC - modes and arbitrary value in byte-oriented modes, such as - CFB and OFB... */ -#endif - - /* VIA promises CPUs that won't require alignment in the future. - For now padlock_aes_align_required is initialized to 1 and - the condition is never met... */ - /* C7 core is capable to manage unaligned input in non-ECB[!] - mode, but performance penalties appear to be approximately - same as for software alignment below or ~3x. They promise to - improve it in the future, but for now we can just as well - pretend that it can only handle aligned input... */ - if (!padlock_aes_align_required && (nbytes%AES_BLOCK_SIZE)==0) - return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes); - - inp_misaligned = (((size_t)in_arg) & 0x0F); - out_misaligned = (((size_t)out_arg) & 0x0F); - - /* Note that even if output is aligned and input not, - * I still prefer to loop instead of copy the whole - * input and then encrypt in one stroke. This is done - * in order to improve L1 cache utilization... */ - realign_in_loop = out_misaligned|inp_misaligned; - - if (!realign_in_loop && (nbytes%AES_BLOCK_SIZE)==0) - return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes); - - /* this takes one "if" out of the loops */ - chunk = nbytes; - chunk %= PADLOCK_CHUNK; - if (chunk==0) chunk = PADLOCK_CHUNK; - - if (out_misaligned) { - /* optmize for small input */ - allocated = (chunkiv, ctx->iv, AES_BLOCK_SIZE); - goto cbc_shortcut; - do { - if (iv != cdata->iv) - memcpy(cdata->iv, iv, AES_BLOCK_SIZE); - chunk = PADLOCK_CHUNK; - cbc_shortcut: /* optimize for small input */ - if (inp_misaligned) - inp = padlock_memcpy(out, in_arg, chunk); - else - inp = in_arg; - in_arg += chunk; - - iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE, cdata, out, inp); - - if (out_misaligned) - out_arg = padlock_memcpy(out_arg, out, chunk) + chunk; - else - out = out_arg+=chunk; - - } while (nbytes -= chunk); - memcpy(ctx->iv, iv, AES_BLOCK_SIZE); - break; - - case EVP_CIPH_CFB_MODE: - memcpy (iv = cdata->iv, ctx->iv, AES_BLOCK_SIZE); - chunk &= ~(AES_BLOCK_SIZE-1); - if (chunk) goto cfb_shortcut; - else goto cfb_skiploop; - do { - if (iv != cdata->iv) - memcpy(cdata->iv, iv, AES_BLOCK_SIZE); - chunk = PADLOCK_CHUNK; - cfb_shortcut: /* optimize for small input */ - if (inp_misaligned) - inp = padlock_memcpy(out, in_arg, chunk); - else - inp = in_arg; - in_arg += chunk; - - iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE, cdata, out, inp); - - if (out_misaligned) - out_arg = padlock_memcpy(out_arg, out, chunk) + chunk; - else - out = out_arg+=chunk; - - nbytes -= chunk; - } while (nbytes >= AES_BLOCK_SIZE); - - cfb_skiploop: - if (nbytes) { - unsigned char *ivp = cdata->iv; - - if (iv != ivp) { - memcpy(ivp, iv, AES_BLOCK_SIZE); - iv = ivp; - } - ctx->num = nbytes; - if (cdata->cword.b.encdec) { - cdata->cword.b.encdec=0; - padlock_reload_key(); - padlock_xcrypt_ecb(1,cdata,ivp,ivp); - cdata->cword.b.encdec=1; - padlock_reload_key(); - while(nbytes) { - unsigned char c = *(in_arg++); - *(out_arg++) = c ^ *ivp; - *(ivp++) = c, nbytes--; - } - } - else { padlock_reload_key(); - padlock_xcrypt_ecb(1,cdata,ivp,ivp); - padlock_reload_key(); - while (nbytes) { - *ivp = *(out_arg++) = *(in_arg++) ^ *ivp; - ivp++, nbytes--; - } - } - } - - memcpy(ctx->iv, iv, AES_BLOCK_SIZE); - break; - - case EVP_CIPH_OFB_MODE: - memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); - chunk &= ~(AES_BLOCK_SIZE-1); - if (chunk) do { - if (inp_misaligned) - inp = padlock_memcpy(out, in_arg, chunk); - else - inp = in_arg; - in_arg += chunk; - - padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE, cdata, out, inp); - - if (out_misaligned) - out_arg = padlock_memcpy(out_arg, out, chunk) + chunk; - else - out = out_arg+=chunk; - - nbytes -= chunk; - chunk = PADLOCK_CHUNK; - } while (nbytes >= AES_BLOCK_SIZE); - - if (nbytes) { - unsigned char *ivp = cdata->iv; - - ctx->num = nbytes; - padlock_reload_key(); /* empirically found */ - padlock_xcrypt_ecb(1,cdata,ivp,ivp); - padlock_reload_key(); /* empirically found */ - while (nbytes) { - *(out_arg++) = *(in_arg++) ^ *ivp; - ivp++, nbytes--; - } - } - - memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE); - break; - - default: - return 0; - } - - /* Clean the realign buffer if it was used */ - if (out_misaligned) { - volatile unsigned long *p=(void *)out; - size_t n = allocated/sizeof(*p); - while (n--) *p++=0; - } - - memset(cdata->iv, 0, AES_BLOCK_SIZE); - - return 1; -} - #endif /* OPENSSL_NO_AES */ /* ===== Random Number Generator ===== */