summaryrefslogtreecommitdiffstats
path: root/crypto/modes/asm/aes-gcm-armv8_64.pl
diff options
context:
space:
mode:
authorFangming.Fang <fangming.fang@arm.com>2019-05-31 10:15:10 +0000
committerPauli <paul.dale@oracle.com>2019-12-19 12:36:07 +1000
commit31b59078c8245a4ee7f7fa4e6ea98bba7f9a29a5 (patch)
treee490a3b2bcf796c9c6e98fe86bcfb3d351a8a1b5 /crypto/modes/asm/aes-gcm-armv8_64.pl
parent51a7c4b5f2a0b2d0f6bc0c87ec2ee44b9697dc78 (diff)
Optimize AES-GCM implementation on aarch64
Comparing to current implementation, this change can get more performance improved by tunning the loop-unrolling factor in interleave implementation as well as by enabling high level parallelism. Performance(A72) new type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes 16384 bytes aes-128-gcm 113065.51k 375743.00k 848359.51k 1517865.98k 1964040.19k 1986663.77k aes-192-gcm 110679.32k 364470.63k 799322.88k 1428084.05k 1826917.03k 1848967.17k aes-256-gcm 104919.86k 352939.29k 759477.76k 1330683.56k 1663175.34k 1670430.72k old type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes 16384 bytes aes-128-gcm 115595.32k 382348.65k 855891.29k 1236452.35k 1425670.14k 1429793.45k aes-192-gcm 112227.02k 369543.47k 810046.55k 1147948.37k 1286288.73k 1296941.06k aes-256-gcm 111543.90k 361902.36k 769543.59k 1070693.03k 1208576.68k 1207511.72k Change-Id: I28a2dca85c001a63a2a942e80c7c64f7a4fdfcf7 Reviewed-by: Bernd Edlinger <bernd.edlinger@hotmail.de> Reviewed-by: Paul Dale <paul.dale@oracle.com> (Merged from https://github.com/openssl/openssl/pull/9818)
Diffstat (limited to 'crypto/modes/asm/aes-gcm-armv8_64.pl')
-rwxr-xr-xcrypto/modes/asm/aes-gcm-armv8_64.pl5722
1 files changed, 5722 insertions, 0 deletions
diff --git a/crypto/modes/asm/aes-gcm-armv8_64.pl b/crypto/modes/asm/aes-gcm-armv8_64.pl
new file mode 100755
index 0000000000..ee88906d85
--- /dev/null
+++ b/crypto/modes/asm/aes-gcm-armv8_64.pl
@@ -0,0 +1,5722 @@
+#! /usr/bin/env perl
+# Copyright 2019 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the Apache License 2.0 (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+#========================================================================
+# Written by Fangming Fang <fangming.fang@arm.com> for the OpenSSL project,
+# derived from https://github.com/ARM-software/AArch64cryptolib, original
+# author Samuel Lee <Samuel.Lee@arm.com>. The module is, however, dual
+# licensed under OpenSSL and CRYPTOGAMS licenses depending on where you
+# obtain it. For further details see http://www.openssl.org/~appro/cryptogams/.
+#========================================================================
+#
+# Approach - assume we don't want to reload constants, so reserve ~half of vector register file for constants
+#
+# main loop to act on 4 16B blocks per iteration, and then do modulo of the accumulated intermediate hashes from the 4 blocks
+#
+# ____________________________________________________
+# | |
+# | PRE |
+# |____________________________________________________|
+# | | | |
+# | CTR block 4k+8 | AES block 4k+4 | GHASH block 4k+0 |
+# |________________|________________|__________________|
+# | | | |
+# | CTR block 4k+9 | AES block 4k+5 | GHASH block 4k+1 |
+# |________________|________________|__________________|
+# | | | |
+# | CTR block 4k+10| AES block 4k+6 | GHASH block 4k+2 |
+# |________________|________________|__________________|
+# | | | |
+# | CTR block 4k+11| AES block 4k+7 | GHASH block 4k+3 |
+# |________________|____(mostly)____|__________________|
+# | |
+# | MODULO |
+# |____________________________________________________|
+#
+# PRE:
+# Ensure previous generated intermediate hash is aligned and merged with result for GHASH 4k+0
+# EXT low_acc, low_acc, low_acc, #8
+# EOR res_curr (4k+0), res_curr (4k+0), low_acc
+#
+# CTR block:
+# Increment and byte reverse counter in scalar registers and transfer to SIMD registers
+# REV ctr32, rev_ctr32
+# ORR ctr64, constctr96_top32, ctr32, LSL #32
+# INS ctr_next.d[0], constctr96_bottom64 // Keeping this in scalar registers to free up space in SIMD RF
+# INS ctr_next.d[1], ctr64X
+# ADD rev_ctr32, #1
+#
+# AES block:
+# Do AES encryption/decryption on CTR block X and EOR it with input block X. Take 256 bytes key below for example.
+# Doing small trick here of loading input in scalar registers, EORing with last key and then transferring
+# Given we are very constrained in our ASIMD registers this is quite important
+#
+# Encrypt:
+# LDR input_low, [ input_ptr ], #8
+# LDR input_high, [ input_ptr ], #8
+# EOR input_low, k14_low
+# EOR input_high, k14_high
+# INS res_curr.d[0], input_low
+# INS res_curr.d[1], input_high
+# AESE ctr_curr, k0; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k1; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k2; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k3; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k4; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k5; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k6; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k7; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k8; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k9; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k10; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k11; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k12; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k13
+# EOR res_curr, res_curr, ctr_curr
+# ST1 { res_curr.16b }, [ output_ptr ], #16
+#
+# Decrypt:
+# AESE ctr_curr, k0; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k1; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k2; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k3; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k4; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k5; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k6; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k7; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k8; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k9; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k10; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k11; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k12; AESMC ctr_curr, ctr_curr
+# AESE ctr_curr, k13
+# LDR res_curr, [ input_ptr ], #16
+# EOR res_curr, res_curr, ctr_curr
+# MOV output_low, res_curr.d[0]
+# MOV output_high, res_curr.d[1]
+# EOR output_low, k14_low
+# EOR output_high, k14_high
+# STP output_low, output_high, [ output_ptr ], #16
+#
+# GHASH block X:
+# do 128b karatsuba polynomial multiplication on block
+# We only have 64b->128b polynomial multipliers, naively that means we need to do 4 64b multiplies to generate a 128b
+#
+# multiplication:
+# Pmull(A,B) == (Pmull(Ah,Bh)<<128 | Pmull(Al,Bl)) ^ (Pmull(Ah,Bl) ^ Pmull(Al,Bh))<<64
+#
+# The idea behind Karatsuba multiplication is that we can do just 3 64b multiplies:
+# Pmull(A,B) == (Pmull(Ah,Bh)<<128 | Pmull(Al,Bl)) ^ (Pmull(Ah^Al,Bh^Bl) ^ Pmull(Ah,Bh) ^ Pmull(Al,Bl))<<64
+#
+# There is some complication here because the bit order of GHASH's PMULL is reversed compared to elsewhere, so we are
+# multiplying with "twisted" powers of H
+#
+# Note: We can PMULL directly into the acc_x in first GHASH of the loop
+# Note: For scheduling big cores we want to split the processing to happen over two loop iterations - otherwise the critical
+# path latency dominates the performance
+#
+# This has a knock on effect on register pressure, so we have to be a bit more clever with our temporary registers
+# than indicated here
+# REV64 res_curr, res_curr
+# INS t_m.d[0], res_curr.d[1]
+# EOR t_m.8B, t_m.8B, res_curr.8B
+# PMULL2 t_h, res_curr, HX
+# PMULL t_l, res_curr, HX
+# PMULL t_m, t_m, HX_k
+# EOR acc_h, acc_h, t_h
+# EOR acc_l, acc_l, t_l
+# EOR acc_m, acc_m, t_m
+#
+# MODULO: take the partial accumulators (~representing sum of 256b multiplication results), from GHASH and do modulo reduction on them
+# There is some complication here because the bit order of GHASH's PMULL is reversed compared to elsewhere, so we are doing modulo
+# with a reversed constant
+# EOR acc_m, acc_m, acc_h
+# EOR acc_m, acc_m, acc_l // Finish off karatsuba processing
+# PMULL t_mod, acc_h, mod_constant
+# EXT acc_h, acc_h, acc_h, #8
+# EOR acc_m, acc_m, acc_h
+# EOR acc_m, acc_m, t_mod
+# PMULL acc_h, acc_m, mod_constant
+# EXT acc_m, acc_m, acc_m, #8
+# EOR acc_l, acc_l, acc_h
+# EOR acc_l, acc_l, acc_m
+
+$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
+$flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate ) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+$input_ptr="x0"; #argument block
+$bit_length="x1";
+$output_ptr="x2";
+$current_tag="x3";
+$counter="x16";
+$cc="x8";
+
+{
+my ($end_input_ptr,$main_end_input_ptr,$input_l0,$input_h0)=map("x$_",(4..7));
+my ($input_l1,$input_h1,$input_l2,$input_h2,$input_l3,$input_h3)=map("x$_",(19..24));
+my ($output_l1,$output_h1,$output_l2,$output_h2,$output_l3,$output_h3)=map("x$_",(19..24));
+my ($output_l0,$output_h0)=map("x$_",(6..7));
+
+my $ctr32w="w9";
+my ($ctr32x,$ctr96_b64x,$ctr96_t32x,$rctr32x,$rk10_l,$rk10_h,$len)=map("x$_",(9..15));
+my ($ctr96_t32w,$rctr32w)=map("w$_",(11..12));
+
+my ($ctr0b,$ctr1b,$ctr2b,$ctr3b,$res0b,$res1b,$res2b,$res3b)=map("v$_.16b",(0..7));
+my ($ctr0,$ctr1,$ctr2,$ctr3,$res0,$res1,$res2,$res3)=map("v$_",(0..7));
+my ($ctr0d,$ctr1d,$ctr2d,$ctr3d,$res0d,$res1d,$res2d,$res3d)=map("d$_",(0..7));
+my ($res0q,$res1q,$res2q,$res3q)=map("q$_",(4..7));
+
+my ($acc_hb,$acc_mb,$acc_lb)=map("v$_.16b",(9..11));
+my ($acc_h,$acc_m,$acc_l)=map("v$_",(9..11));
+my ($acc_hd,$acc_md,$acc_ld)=map("d$_",(9..11));
+
+my ($h1,$h2,$h3,$h4,$h12k,$h34k)=map("v$_",(12..17));
+my ($h1q,$h2q,$h3q,$h4q)=map("q$_",(12..15));
+my ($h1b,$h2b,$h3b,$h4b)=map("v$_.16b",(12..15));
+
+my $t0="v8";
+my $t0d="d8";
+
+my ($t1,$t2,$t3)=map("v$_",(28..30));
+my ($t1d,$t2d,$t3d)=map("d$_",(28..30));
+
+my $t4="v8";
+my $t4d="d8";
+my $t5="v28";
+my $t5d="d28";
+my $t6="v31";
+my $t6d="d31";
+
+my $t7="v4";
+my $t7d="d4";
+my $t8="v29";
+my $t8d="d29";
+my $t9="v30";
+my $t9d="d30";
+
+my ($ctr_t0,$ctr_t1,$ctr_t2,$ctr_t3)=map("v$_",(4..7));
+my ($ctr_t0d,$ctr_t1d,$ctr_t2d,$ctr_t3d)=map("d$_",(4..7));
+my ($ctr_t0b,$ctr_t1b,$ctr_t2b,$ctr_t3b)=map("v$_.16b",(4..7));
+
+my $mod_constantd="d8";
+my $mod_constant="v8";
+my $mod_t="v31";
+
+my ($rk0,$rk1,$rk2,$rk3,$rk4,$rk5,$rk6,$rk7,$rk8,$rk9)=map("v$_.16b",(18..27));
+my ($rk0q,$rk1q,$rk2q,$rk3q,$rk4q,$rk5q,$rk6q,$rk7q,$rk8q,$rk9q)=map("q$_",(18..27));
+my $rk2q1="v20.1q";
+my $rk3q1="v21.1q";
+my $rk4v="v22";
+my $rk4d="d22";
+
+$code=<<___;
+#include "arm_arch.h"
+
+#if __ARM_MAX_ARCH__>=8
+___
+$code.=".arch armv8-a+crypto\n.text\n" if ($flavour =~ /64/);
+$code.=<<___ if ($flavour !~ /64/);
+.fpu neon
+#ifdef __thumb2__
+.syntax unified
+.thumb
+# define INST(a,b,c,d) $_byte c,0xef,a,b
+#else
+.code 32
+# define INST(a,b,c,d) $_byte a,b,c,0xf2
+#endif
+
+.text
+___
+
+#########################################################################################
+# size_t aes_gcm_enc_128_kernel(const unsigned char *in,
+# size_t len,
+# unsigned char *out,
+# const void *key,
+# unsigned char ivec[16],
+# u64 *Xi);
+#
+$code.=<<___;
+.global aes_gcm_enc_128_kernel
+.type aes_gcm_enc_128_kernel,%function
+.align 4
+aes_gcm_enc_128_kernel:
+ cbz x1, .L128_enc_ret
+ stp x19, x20, [sp, #-112]!
+ mov x16, x4
+ mov x8, x5
+ stp x21, x22, [sp, #16]
+ stp x23, x24, [sp, #32]
+ stp d8, d9, [sp, #48]
+ stp d10, d11, [sp, #64]
+ stp d12, d13, [sp, #80]
+ stp d14, d15, [sp, #96]
+
+ ldp $ctr96_b64x, $ctr96_t32x, [$counter] @ ctr96_b64, ctr96_t32
+ ldp $rk10_l, $rk10_h, [$cc, #160] @ load rk10
+
+ ld1 {$acc_lb}, [$current_tag]
+ ext $acc_lb, $acc_lb, $acc_lb, #8
+ rev64 $acc_lb, $acc_lb
+ lsr $main_end_input_ptr, $bit_length, #3 @ byte_len
+ mov $len, $main_end_input_ptr
+
+ ldr $rk9q, [$cc, #144] @ load rk9
+ add $end_input_ptr, $input_ptr, $bit_length, lsr #3 @ end_input_ptr
+ sub $main_end_input_ptr, $main_end_input_ptr, #1 @ byte_len - 1
+
+ lsr $rctr32x, $ctr96_t32x, #32
+ ldr $h4q, [$current_tag, #112] @ load h4l | h4h
+ ext $h4b, $h4b, $h4b, #8
+
+ fmov $ctr1d, $ctr96_b64x @ CTR block 1
+ rev $rctr32w, $rctr32w @ rev_ctr32
+
+ add $rctr32w, $rctr32w, #1 @ increment rev_ctr32
+ orr $ctr96_t32w, $ctr96_t32w, $ctr96_t32w
+ ldr $rk0q, [$cc, #0] @ load rk0
+
+ rev $ctr32w, $rctr32w @ CTR block 1
+ add $rctr32w, $rctr32w, #1 @ CTR block 1
+ fmov $ctr3d, $ctr96_b64x @ CTR block 3
+
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 1
+ ld1 { $ctr0b}, [$counter] @ special case vector load initial counter so we can start first AES block as quickly as possible
+
+ fmov $ctr1.d[1], $ctr32x @ CTR block 1
+ rev $ctr32w, $rctr32w @ CTR block 2
+
+ fmov $ctr2d, $ctr96_b64x @ CTR block 2
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 2
+ add $rctr32w, $rctr32w, #1 @ CTR block 2
+
+ fmov $ctr2.d[1], $ctr32x @ CTR block 2
+ rev $ctr32w, $rctr32w @ CTR block 3
+
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 3
+ ldr $rk1q, [$cc, #16] @ load rk1
+
+ add $rctr32w, $rctr32w, #1 @ CTR block 3
+ fmov $ctr3.d[1], $ctr32x @ CTR block 3
+
+ ldr $h3q, [$current_tag, #80] @ load h3l | h3h
+ ext $h3b, $h3b, $h3b, #8
+
+ aese $ctr1b, $rk0 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 0
+ ldr $rk2q, [$cc, #32] @ load rk2
+
+ aese $ctr2b, $rk0 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 0
+ ldr $h1q, [$current_tag, #32] @ load h1l | h1h
+ ext $h1b, $h1b, $h1b, #8
+
+ aese $ctr0b, $rk0 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 0
+ ldr $rk8q, [$cc, #128] @ load rk8
+
+ aese $ctr3b, $rk0 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 0
+ ldr $rk3q, [$cc, #48] @ load rk3
+
+ aese $ctr2b, $rk1 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 1
+ trn2 $h34k.2d, $h3.2d, $h4.2d @ h4l | h3l
+
+ aese $ctr0b, $rk1 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 1
+ ldr $rk6q, [$cc, #96] @ load rk6
+
+ aese $ctr1b, $rk1 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 1
+ ldr $rk7q, [$cc, #112] @ load rk7
+
+ aese $ctr3b, $rk1 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 1
+ trn1 $acc_h.2d, $h3.2d, $h4.2d @ h4h | h3h
+
+ aese $ctr0b, $rk2 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 2
+ ldr $rk5q, [$cc, #80] @ load rk5
+
+ aese $ctr1b, $rk2 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 2
+ ldr $h2q, [$current_tag, #64] @ load h2l | h2h
+ ext $h2b, $h2b, $h2b, #8
+
+ aese $ctr3b, $rk2 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 2
+
+ aese $ctr2b, $rk2 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 2
+ eor $h34k.16b, $h34k.16b, $acc_h.16b @ h4k | h3k
+
+ aese $ctr0b, $rk3 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 3
+
+ aese $ctr1b, $rk3 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 3
+
+ aese $ctr2b, $rk3 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 3
+ ldr $rk4q, [$cc, #64] @ load rk4
+
+ aese $ctr3b, $rk3 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 3
+
+ and $main_end_input_ptr, $main_end_input_ptr, #0xffffffffffffffc0 @ number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
+ trn2 $h12k.2d, $h1.2d, $h2.2d @ h2l | h1l
+
+ aese $ctr3b, $rk4 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 4
+ add $main_end_input_ptr, $main_end_input_ptr, $input_ptr
+
+ aese $ctr2b, $rk4 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 4
+ cmp $input_ptr, $main_end_input_ptr @ check if we have <= 4 blocks
+
+ aese $ctr0b, $rk4 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 4
+
+ aese $ctr3b, $rk5 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 5
+
+ aese $ctr2b, $rk5 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 5
+
+ aese $ctr0b, $rk5 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 5
+
+ aese $ctr3b, $rk6 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 6
+
+ aese $ctr1b, $rk4 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 4
+
+ aese $ctr2b, $rk6 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 6
+ trn1 $t0.2d, $h1.2d, $h2.2d @ h2h | h1h
+
+ aese $ctr0b, $rk6 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 6
+
+ aese $ctr1b, $rk5 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 5
+
+ aese $ctr3b, $rk7 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 7
+
+ aese $ctr0b, $rk7 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 7
+
+ aese $ctr1b, $rk6 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 6
+
+ aese $ctr2b, $rk7 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 7
+
+ aese $ctr0b, $rk8 \n aesmc $ctr0b, $ctr0b @ AES block 0 - round 8
+
+ aese $ctr1b, $rk7 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 7
+
+ aese $ctr2b, $rk8 \n aesmc $ctr2b, $ctr2b @ AES block 2 - round 8
+
+ aese $ctr3b, $rk8 \n aesmc $ctr3b, $ctr3b @ AES block 3 - round 8
+
+ aese $ctr1b, $rk8 \n aesmc $ctr1b, $ctr1b @ AES block 1 - round 8
+
+ aese $ctr2b, $rk9 @ AES block 2 - round 9
+
+ aese $ctr0b, $rk9 @ AES block 0 - round 9
+
+ eor $h12k.16b, $h12k.16b, $t0.16b @ h2k | h1k
+
+ aese $ctr1b, $rk9 @ AES block 1 - round 9
+
+ aese $ctr3b, $rk9 @ AES block 3 - round 9
+ b.ge .L128_enc_tail @ handle tail
+
+ ldp $input_l0, $input_h0, [$input_ptr, #0] @ AES block 0 - load plaintext
+
+ ldp $input_l2, $input_h2, [$input_ptr, #32] @ AES block 2 - load plaintext
+
+ ldp $input_l1, $input_h1, [$input_ptr, #16] @ AES block 1 - load plaintext
+
+ ldp $input_l3, $input_h3, [$input_ptr, #48] @ AES block 3 - load plaintext
+
+ eor $input_l0, $input_l0, $rk10_l @ AES block 0 - round 10 low
+ eor $input_h0, $input_h0, $rk10_h @ AES block 0 - round 10 high
+
+ eor $input_l2, $input_l2, $rk10_l @ AES block 2 - round 10 low
+ fmov $ctr_t0d, $input_l0 @ AES block 0 - mov low
+
+ eor $input_l1, $input_l1, $rk10_l @ AES block 1 - round 10 low
+ eor $input_h2, $input_h2, $rk10_h @ AES block 2 - round 10 high
+ fmov $ctr_t0.d[1], $input_h0 @ AES block 0 - mov high
+
+ fmov $ctr_t1d, $input_l1 @ AES block 1 - mov low
+ eor $input_h1, $input_h1, $rk10_h @ AES block 1 - round 10 high
+
+ eor $input_l3, $input_l3, $rk10_l @ AES block 3 - round 10 low
+ fmov $ctr_t1.d[1], $input_h1 @ AES block 1 - mov high
+
+ fmov $ctr_t2d, $input_l2 @ AES block 2 - mov low
+ eor $input_h3, $input_h3, $rk10_h @ AES block 3 - round 10 high
+ rev $ctr32w, $rctr32w @ CTR block 4
+
+ fmov $ctr_t2.d[1], $input_h2 @ AES block 2 - mov high
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 4
+
+ eor $res0b, $ctr_t0b, $ctr0b @ AES block 0 - result
+ fmov $ctr0d, $ctr96_b64x @ CTR block 4
+ add $rctr32w, $rctr32w, #1 @ CTR block 4
+
+ fmov $ctr0.d[1], $ctr32x @ CTR block 4
+ rev $ctr32w, $rctr32w @ CTR block 5
+
+ eor $res1b, $ctr_t1b, $ctr1b @ AES block 1 - result
+ fmov $ctr1d, $ctr96_b64x @ CTR block 5
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 5
+
+ add $rctr32w, $rctr32w, #1 @ CTR block 5
+ add $input_ptr, $input_ptr, #64 @ AES input_ptr update
+ fmov $ctr1.d[1], $ctr32x @ CTR block 5
+
+ fmov $ctr_t3d, $input_l3 @ AES block 3 - mov low
+ rev $ctr32w, $rctr32w @ CTR block 6
+ st1 { $res0b}, [$output_ptr], #16 @ AES block 0 - store result
+
+ fmov $ctr_t3.d[1], $input_h3 @ AES block 3 - mov high
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 6
+
+ add $rctr32w, $rctr32w, #1 @ CTR block 6
+ eor $res2b, $ctr_t2b, $ctr2b @ AES block 2 - result
+ st1 { $res1b}, [$output_ptr], #16 @ AES block 1 - store result
+
+ fmov $ctr2d, $ctr96_b64x @ CTR block 6
+ cmp $input_ptr, $main_end_input_ptr @ check if we have <= 8 blocks
+
+ fmov $ctr2.d[1], $ctr32x @ CTR block 6
+ rev $ctr32w, $rctr32w @ CTR block 7
+ st1 { $res2b}, [$output_ptr], #16 @ AES block 2 - store result
+
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 7
+
+ eor $res3b, $ctr_t3b, $ctr3b @ AES block 3 - result
+ st1 { $res3b}, [$output_ptr], #16 @ AES block 3 - store result
+ b.ge .L128_enc_prepretail @ do prepretail
+
+ .L128_enc_main_loop: @ main loop start
+ ldp $input_l3, $input_h3, [$input_ptr, #48] @ AES block 4k+3 - load plaintext
+ rev64 $res0b, $res0b @ GHASH block 4k (only t0 is free)
+ rev64 $res2b, $res2b @ GHASH block 4k+2 (t0, t1, and t2 free)
+
+ aese $ctr2b, $rk0 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 0
+ fmov $ctr3d, $ctr96_b64x @ CTR block 4k+3
+
+ ext $acc_lb, $acc_lb, $acc_lb, #8 @ PRE 0
+ rev64 $res1b, $res1b @ GHASH block 4k+1 (t0 and t1 free)
+
+ aese $ctr1b, $rk0 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 0
+ add $rctr32w, $rctr32w, #1 @ CTR block 4k+3
+ fmov $ctr3.d[1], $ctr32x @ CTR block 4k+3
+
+ aese $ctr0b, $rk0 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 0
+ mov $t6d, $res2.d[1] @ GHASH block 4k+2 - mid
+
+ aese $ctr2b, $rk1 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 1
+ mov $t3d, $res1.d[1] @ GHASH block 4k+1 - mid
+
+ aese $ctr1b, $rk1 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 1
+ eor $res0b, $res0b, $acc_lb @ PRE 1
+
+ aese $ctr3b, $rk0 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 0
+ eor $input_h3, $input_h3, $rk10_h @ AES block 4k+3 - round 10 high
+
+ pmull2 $t1.1q, $res1.2d, $h3.2d @ GHASH block 4k+1 - high
+ eor $t6.8b, $t6.8b, $res2.8b @ GHASH block 4k+2 - mid
+ ldp $input_l0, $input_h0, [$input_ptr, #0] @ AES block 4k+4 - load plaintext
+
+ aese $ctr0b, $rk1 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 1
+ rev $ctr32w, $rctr32w @ CTR block 4k+8
+
+ eor $t3.8b, $t3.8b, $res1.8b @ GHASH block 4k+1 - mid
+ mov $t0d, $res0.d[1] @ GHASH block 4k - mid
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 4k+8
+
+ pmull2 $acc_h.1q, $res0.2d, $h4.2d @ GHASH block 4k - high
+ add $rctr32w, $rctr32w, #1 @ CTR block 4k+8
+ mov $acc_md, $h34k.d[1] @ GHASH block 4k - mid
+
+ aese $ctr0b, $rk2 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 2
+
+ pmull $acc_l.1q, $res0.1d, $h4.1d @ GHASH block 4k - low
+ eor $t0.8b, $t0.8b, $res0.8b @ GHASH block 4k - mid
+
+ aese $ctr1b, $rk2 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 2
+
+ aese $ctr0b, $rk3 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 3
+ eor $acc_hb, $acc_hb, $t1.16b @ GHASH block 4k+1 - high
+
+ pmull $t5.1q, $res2.1d, $h2.1d @ GHASH block 4k+2 - low
+
+ pmull $acc_m.1q, $t0.1d, $acc_m.1d @ GHASH block 4k - mid
+ rev64 $res3b, $res3b @ GHASH block 4k+3 (t0, t1, t2 and t3 free)
+
+ pmull $t3.1q, $t3.1d, $h34k.1d @ GHASH block 4k+1 - mid
+
+ pmull $t2.1q, $res1.1d, $h3.1d @ GHASH block 4k+1 - low
+ ins $t6.d[1], $t6.d[0] @ GHASH block 4k+2 - mid
+
+ pmull2 $t4.1q, $res2.2d, $h2.2d @ GHASH block 4k+2 - high
+ eor $input_h0, $input_h0, $rk10_h @ AES block 4k+4 - round 10 high
+
+ eor $acc_mb, $acc_mb, $t3.16b @ GHASH block 4k+1 - mid
+ mov $t9d, $res3.d[1] @ GHASH block 4k+3 - mid
+
+ aese $ctr3b, $rk1 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 1
+ eor $acc_lb, $acc_lb, $t2.16b @ GHASH block 4k+1 - low
+
+ aese $ctr2b, $rk2 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 2
+ eor $input_l0, $input_l0, $rk10_l @ AES block 4k+4 - round 10 low
+
+ aese $ctr1b, $rk3 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 3
+ eor $t9.8b, $t9.8b, $res3.8b @ GHASH block 4k+3 - mid
+
+ pmull2 $t7.1q, $res3.2d, $h1.2d @ GHASH block 4k+3 - high
+
+ aese $ctr2b, $rk3 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 3
+ eor $acc_hb, $acc_hb, $t4.16b @ GHASH block 4k+2 - high
+
+ pmull2 $t6.1q, $t6.2d, $h12k.2d @ GHASH block 4k+2 - mid
+
+ pmull $t8.1q, $res3.1d, $h1.1d @ GHASH block 4k+3 - low
+ movi $mod_constant.8b, #0xc2
+
+ pmull $t9.1q, $t9.1d, $h12k.1d @ GHASH block 4k+3 - mid
+ eor $acc_lb, $acc_lb, $t5.16b @ GHASH block 4k+2 - low
+
+ aese $ctr1b, $rk4 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 4
+
+ aese $ctr3b, $rk2 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 2
+ shl $mod_constantd, $mod_constantd, #56 @ mod_constant
+
+ aese $ctr0b, $rk4 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 4
+ eor $acc_hb, $acc_hb, $t7.16b @ GHASH block 4k+3 - high
+
+ aese $ctr1b, $rk5 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 5
+ ldp $input_l1, $input_h1, [$input_ptr, #16] @ AES block 4k+5 - load plaintext
+
+ aese $ctr3b, $rk3 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 3
+ eor $acc_mb, $acc_mb, $t6.16b @ GHASH block 4k+2 - mid
+
+ aese $ctr0b, $rk5 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 5
+ ldp $input_l2, $input_h2, [$input_ptr, #32] @ AES block 4k+6 - load plaintext
+
+ pmull $mod_t.1q, $acc_h.1d, $mod_constant.1d @ MODULO - top 64b align with mid
+ eor $acc_lb, $acc_lb, $t8.16b @ GHASH block 4k+3 - low
+
+ aese $ctr2b, $rk4 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 4
+ eor $input_l1, $input_l1, $rk10_l @ AES block 4k+5 - round 10 low
+
+ aese $ctr3b, $rk4 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 4
+ eor $acc_mb, $acc_mb, $t9.16b @ GHASH block 4k+3 - mid
+
+ aese $ctr1b, $rk6 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 6
+ eor $input_l3, $input_l3, $rk10_l @ AES block 4k+3 - round 10 low
+
+ aese $ctr2b, $rk5 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 5
+ eor $t9.16b, $acc_lb, $acc_hb @ MODULO - karatsuba tidy up
+
+ fmov $ctr_t0d, $input_l0 @ AES block 4k+4 - mov low
+ aese $ctr0b, $rk6 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 6
+ fmov $ctr_t0.d[1], $input_h0 @ AES block 4k+4 - mov high
+
+ add $input_ptr, $input_ptr, #64 @ AES input_ptr update
+ fmov $ctr_t3d, $input_l3 @ AES block 4k+3 - mov low
+ ext $acc_hb, $acc_hb, $acc_hb, #8 @ MODULO - other top alignment
+
+ aese $ctr3b, $rk5 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 5
+ fmov $ctr_t1d, $input_l1 @ AES block 4k+5 - mov low
+
+ aese $ctr0b, $rk7 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 7
+ eor $acc_mb, $acc_mb, $t9.16b @ MODULO - karatsuba tidy up
+
+ aese $ctr2b, $rk6 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 6
+ eor $input_h1, $input_h1, $rk10_h @ AES block 4k+5 - round 10 high
+
+ aese $ctr1b, $rk7 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 7
+ fmov $ctr_t1.d[1], $input_h1 @ AES block 4k+5 - mov high
+
+ aese $ctr0b, $rk8 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 8
+ fmov $ctr_t3.d[1], $input_h3 @ AES block 4k+3 - mov high
+
+ aese $ctr3b, $rk6 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 6
+ cmp $input_ptr, $main_end_input_ptr @ LOOP CONTROL
+
+ aese $ctr1b, $rk8 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 8
+ eor $acc_mb, $acc_mb, $mod_t.16b @ MODULO - fold into mid
+
+ aese $ctr0b, $rk9 @ AES block 4k+4 - round 9
+ eor $input_l2, $input_l2, $rk10_l @ AES block 4k+6 - round 10 low
+ eor $input_h2, $input_h2, $rk10_h @ AES block 4k+6 - round 10 high
+
+ aese $ctr3b, $rk7 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 7
+ fmov $ctr_t2d, $input_l2 @ AES block 4k+6 - mov low
+
+ aese $ctr1b, $rk9 @ AES block 4k+5 - round 9
+ fmov $ctr_t2.d[1], $input_h2 @ AES block 4k+6 - mov high
+
+ aese $ctr2b, $rk7 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 7
+ eor $res0b, $ctr_t0b, $ctr0b @ AES block 4k+4 - result
+
+ fmov $ctr0d, $ctr96_b64x @ CTR block 4k+8
+ aese $ctr3b, $rk8 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 8
+
+ fmov $ctr0.d[1], $ctr32x @ CTR block 4k+8
+ rev $ctr32w, $rctr32w @ CTR block 4k+9
+ eor $acc_mb, $acc_mb, $acc_hb @ MODULO - fold into mid
+
+ aese $ctr2b, $rk8 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 8
+ eor $res1b, $ctr_t1b, $ctr1b @ AES block 4k+5 - result
+
+ add $rctr32w, $rctr32w, #1 @ CTR block 4k+9
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 4k+9
+ fmov $ctr1d, $ctr96_b64x @ CTR block 4k+9
+
+ pmull $acc_h.1q, $acc_m.1d, $mod_constant.1d @ MODULO - mid 64b align with low
+ fmov $ctr1.d[1], $ctr32x @ CTR block 4k+9
+ rev $ctr32w, $rctr32w @ CTR block 4k+10
+
+ aese $ctr2b, $rk9 @ AES block 4k+6 - round 9
+ st1 { $res0b}, [$output_ptr], #16 @ AES block 4k+4 - store result
+ eor $res2b, $ctr_t2b, $ctr2b @ AES block 4k+6 - result
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 4k+10
+
+ aese $ctr3b, $rk9 @ AES block 4k+7 - round 9
+ add $rctr32w, $rctr32w, #1 @ CTR block 4k+10
+ ext $acc_mb, $acc_mb, $acc_mb, #8 @ MODULO - other mid alignment
+ fmov $ctr2d, $ctr96_b64x @ CTR block 4k+10
+
+ eor $acc_lb, $acc_lb, $acc_hb @ MODULO - fold into low
+ st1 { $res1b}, [$output_ptr], #16 @ AES block 4k+5 - store result
+
+ fmov $ctr2.d[1], $ctr32x @ CTR block 4k+10
+ st1 { $res2b}, [$output_ptr], #16 @ AES block 4k+6 - store result
+ rev $ctr32w, $rctr32w @ CTR block 4k+11
+
+ orr $ctr32x, $ctr96_t32x, $ctr32x, lsl #32 @ CTR block 4k+11
+ eor $res3b, $ctr_t3b, $ctr3b @ AES block 4k+3 - result
+
+ eor $acc_lb, $acc_lb, $acc_mb @ MODULO - fold into low
+ st1 { $res3b}, [$output_ptr], #16 @ AES block 4k+3 - store result
+ b.lt .L128_enc_main_loop
+
+ .L128_enc_prepretail: @ PREPRETAIL
+ rev64 $res0b, $res0b @ GHASH block 4k (only t0 is free)
+ fmov $ctr3d, $ctr96_b64x @ CTR block 4k+3
+ rev64 $res1b, $res1b @ GHASH block 4k+1 (t0 and t1 free)
+
+ ext $acc_lb, $acc_lb, $acc_lb, #8 @ PRE 0
+ add $rctr32w, $rctr32w, #1 @ CTR block 4k+3
+ fmov $ctr3.d[1], $ctr32x @ CTR block 4k+3
+
+ aese $ctr1b, $rk0 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 0
+ rev64 $res2b, $res2b @ GHASH block 4k+2 (t0, t1, and t2 free)
+
+ pmull $t2.1q, $res1.1d, $h3.1d @ GHASH block 4k+1 - low
+
+ rev64 $res3b, $res3b @ GHASH block 4k+3 (t0, t1, t2 and t3 free)
+ eor $res0b, $res0b, $acc_lb @ PRE 1
+
+ pmull2 $t1.1q, $res1.2d, $h3.2d @ GHASH block 4k+1 - high
+
+ aese $ctr3b, $rk0 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 0
+ mov $t3d, $res1.d[1] @ GHASH block 4k+1 - mid
+
+ pmull $acc_l.1q, $res0.1d, $h4.1d @ GHASH block 4k - low
+ mov $t0d, $res0.d[1] @ GHASH block 4k - mid
+
+ mov $t6d, $res2.d[1] @ GHASH block 4k+2 - mid
+ mov $acc_md, $h34k.d[1] @ GHASH block 4k - mid
+
+ aese $ctr1b, $rk1 \n aesmc $ctr1b, $ctr1b @ AES block 4k+5 - round 1
+ eor $t3.8b, $t3.8b, $res1.8b @ GHASH block 4k+1 - mid
+
+ eor $t0.8b, $t0.8b, $res0.8b @ GHASH block 4k - mid
+
+ pmull2 $acc_h.1q, $res0.2d, $h4.2d @ GHASH block 4k - high
+ eor $t6.8b, $t6.8b, $res2.8b @ GHASH block 4k+2 - mid
+
+ aese $ctr3b, $rk1 \n aesmc $ctr3b, $ctr3b @ AES block 4k+7 - round 1
+
+ pmull $t3.1q, $t3.1d, $h34k.1d @ GHASH block 4k+1 - mid
+ eor $acc_lb, $acc_lb, $t2.16b @ GHASH block 4k+1 - low
+
+ pmull $acc_m.1q, $t0.1d, $acc_m.1d @ GHASH block 4k - mid
+
+ aese $ctr0b, $rk0 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 0
+ ins $t6.d[1], $t6.d[0] @ GHASH block 4k+2 - mid
+
+ aese $ctr2b, $rk0 \n aesmc $ctr2b, $ctr2b @ AES block 4k+6 - round 0
+
+ eor $acc_mb, $acc_mb, $t3.16b @ GHASH block 4k+1 - mid
+ mov $t9d, $res3.d[1] @ GHASH block 4k+3 - mid
+
+ aese $ctr0b, $rk1 \n aesmc $ctr0b, $ctr0b @ AES block 4k+4 - round 1
+ eor $acc_hb, $acc_hb, $t1.16b @ GHASH block 4k+1 - high
+
+ pmull2 $t6.1q, $t6.2d, $h12k.2d @ GHASH block 4k+2 - mid
+
+ pmull2 $t4.1q, $res2.2d, $h2.2d @ GHASH block 4k+2 - high
+ eor $t9.8b, $t9.8b, $res3.8b @ GHASH block 4k+3 - mid
+
+ pmull2 $t7.1q, $res