diff options
Diffstat (limited to 'crypto/aes/asm')
-rwxr-xr-x | crypto/aes/asm/aes-x86_64.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aesni-sha1-x86_64.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aesni-sha256-x86_64.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aesni-x86_64.pl | 2 | ||||
-rwxr-xr-x | crypto/aes/asm/aesv8-armx.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/bsaes-armv8.pl | 5 | ||||
-rw-r--r-- | crypto/aes/asm/bsaes-x86_64.pl | 5 | ||||
-rw-r--r-- | crypto/aes/asm/vpaes-x86_64.pl | 3 |
8 files changed, 19 insertions, 4 deletions
diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl index 25f7ded947..a878c86662 100755 --- a/crypto/aes/asm/aes-x86_64.pl +++ b/crypto/aes/asm/aes-x86_64.pl @@ -2221,6 +2221,7 @@ ___ } $code.=<<___; +.section .rodata align=64 .align 64 .LAES_Te: ___ @@ -2643,6 +2644,7 @@ $code.=<<___; .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0 .asciz "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>" .align 64 +.previous ___ # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, diff --git a/crypto/aes/asm/aesni-sha1-x86_64.pl b/crypto/aes/asm/aesni-sha1-x86_64.pl index dbe33a3f1a..9b58f31381 100644 --- a/crypto/aes/asm/aesni-sha1-x86_64.pl +++ b/crypto/aes/asm/aesni-sha1-x86_64.pl @@ -1738,6 +1738,7 @@ ___ }}} } $code.=<<___; +.section .rodata align=64 .align 64 K_XX_XX: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19 @@ -1749,6 +1750,7 @@ K_XX_XX: .asciz "AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>" .align 64 +.previous ___ if ($shaext) {{{ ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10"); diff --git a/crypto/aes/asm/aesni-sha256-x86_64.pl b/crypto/aes/asm/aesni-sha256-x86_64.pl index 5521766a6a..5c8bb0fbcc 100644 --- a/crypto/aes/asm/aesni-sha256-x86_64.pl +++ b/crypto/aes/asm/aesni-sha256-x86_64.pl @@ -168,6 +168,7 @@ $code.=<<___; .cfi_endproc .size $func,.-$func +.section .rodata align=64 .align 64 .type $TABLE,\@object $TABLE: @@ -210,6 +211,7 @@ $TABLE: .long 0,0,0,0, 0,0,0,0 .asciz "AESNI-CBC+SHA256 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>" .align 64 +.previous ___ ###################################################################### diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl index 09c8f78890..fde5633f4b 100644 --- a/crypto/aes/asm/aesni-x86_64.pl +++ b/crypto/aes/asm/aesni-x86_64.pl @@ -4743,6 +4743,7 @@ ___ } $code.=<<___; +.section .rodata align=64 .align 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 @@ -4765,6 +4766,7 @@ $code.=<<___; .asciz "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>" .align 64 +.previous ___ # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, diff --git a/crypto/aes/asm/aesv8-armx.pl b/crypto/aes/asm/aesv8-armx.pl index 642d779b99..33a2dd53da 100755 --- a/crypto/aes/asm/aesv8-armx.pl +++ b/crypto/aes/asm/aesv8-armx.pl @@ -1,5 +1,5 @@ #! /usr/bin/env perl -# Copyright 2014-2023 The OpenSSL Project Authors. All Rights Reserved. +# Copyright 2014-2024 The OpenSSL Project Authors. All Rights Reserved. # # Licensed under the Apache License 2.0 (the "License"). You may not use # this file except in compliance with the License. You can obtain a copy diff --git a/crypto/aes/asm/bsaes-armv8.pl b/crypto/aes/asm/bsaes-armv8.pl index b3c97e439f..c3c5ff3e05 100644 --- a/crypto/aes/asm/bsaes-armv8.pl +++ b/crypto/aes/asm/bsaes-armv8.pl @@ -1018,6 +1018,7 @@ _bsaes_key_convert: // Initialisation vector overwritten with last quadword of ciphertext // No output registers, usual AAPCS64 register preservation ossl_bsaes_cbc_encrypt: + AARCH64_VALID_CALL_TARGET cmp x2, #128 bhs .Lcbc_do_bsaes b AES_cbc_encrypt @@ -1270,7 +1271,7 @@ ossl_bsaes_cbc_encrypt: // Output text filled in // No output registers, usual AAPCS64 register preservation ossl_bsaes_ctr32_encrypt_blocks: - + AARCH64_VALID_CALL_TARGET cmp x2, #8 // use plain AES for blo .Lctr_enc_short // small sizes @@ -1476,6 +1477,7 @@ ossl_bsaes_ctr32_encrypt_blocks: // Output ciphertext filled in // No output registers, usual AAPCS64 register preservation ossl_bsaes_xts_encrypt: + AARCH64_VALID_CALL_TARGET // Stack layout: // sp -> // nrounds*128-96 bytes: key schedule @@ -1921,6 +1923,7 @@ ossl_bsaes_xts_encrypt: // Output plaintext filled in // No output registers, usual AAPCS64 register preservation ossl_bsaes_xts_decrypt: + AARCH64_VALID_CALL_TARGET // Stack layout: // sp -> // nrounds*128-96 bytes: key schedule diff --git a/crypto/aes/asm/bsaes-x86_64.pl b/crypto/aes/asm/bsaes-x86_64.pl index 6498cfe908..002a51a603 100644 --- a/crypto/aes/asm/bsaes-x86_64.pl +++ b/crypto/aes/asm/bsaes-x86_64.pl @@ -2182,6 +2182,7 @@ $code.=<<___; .align 16 ossl_bsaes_xts_encrypt: .cfi_startproc + endbranch mov %rsp, %rax .Lxts_enc_prologue: push %rbp @@ -2581,6 +2582,7 @@ $code.=<<___; .align 16 ossl_bsaes_xts_decrypt: .cfi_startproc + endbranch mov %rsp, %rax .Lxts_dec_prologue: push %rbp @@ -3003,6 +3005,7 @@ ___ } $code.=<<___; .type _bsaes_const,\@object +.section .rodata align=64 .align 64 _bsaes_const: .LM0ISR: # InvShiftRows constants @@ -3054,9 +3057,9 @@ _bsaes_const: .quad 0x02060a0e03070b0f, 0x0004080c0105090d .L63: .quad 0x6363636363636363, 0x6363636363636363 -.asciz "Bit-sliced AES for x86_64/SSSE3, Emilia Käsper, Peter Schwabe, Andy Polyakov" .align 64 .size _bsaes_const,.-_bsaes_const +.asciz "Bit-sliced AES for x86_64/SSSE3, Emilia Käsper, Peter Schwabe, Andy Polyakov" ___ # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, diff --git a/crypto/aes/asm/vpaes-x86_64.pl b/crypto/aes/asm/vpaes-x86_64.pl index 845528f41a..eb8937684c 100644 --- a/crypto/aes/asm/vpaes-x86_64.pl +++ b/crypto/aes/asm/vpaes-x86_64.pl @@ -1006,6 +1006,7 @@ _vpaes_preheat: ## ## ######################################################## .type _vpaes_consts,\@object +.section .rodata align=64 .align 64 _vpaes_consts: .Lk_inv: # inv, inva @@ -1101,9 +1102,9 @@ _vpaes_consts: .Lk_dsbo: # decryption sbox final output .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C -.asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)" .align 64 .size _vpaes_consts,.-_vpaes_consts +.asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)" ___ if ($win64) { |