[tip:x86/debug] x86/asm/crypto: Create stack frames in crypto functions

From: tip-bot for Josh Poimboeuf
Date: Thu Feb 25 2016 - 00:51:23 EST


Commit-ID: 8691ccd764f9ecc69a6812dfe76214c86ac9ba06
Gitweb: http://git.kernel.org/tip/8691ccd764f9ecc69a6812dfe76214c86ac9ba06
Author: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
AuthorDate: Thu, 21 Jan 2016 16:49:19 -0600
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Wed, 24 Feb 2016 08:35:43 +0100

x86/asm/crypto: Create stack frames in crypto functions

The crypto code has several callable non-leaf functions which don't
honor CONFIG_FRAME_POINTER, which can result in bad stack traces.

Create stack frames for them when CONFIG_FRAME_POINTER is enabled.

Signed-off-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
Cc: Bernd Petrovitsch <bernd@xxxxxxxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Chris J Arges <chris.j.arges@xxxxxxxxxxxxx>
Cc: David S. Miller <davem@xxxxxxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Cc: Jiri Slaby <jslaby@xxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Michal Marek <mmarek@xxxxxxx>
Cc: Namhyung Kim <namhyung@xxxxxxxxx>
Cc: Pedro Alves <palves@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: live-patching@xxxxxxxxxxxxxxx
Link: http://lkml.kernel.org/r/6c20192bcf1102ae18ae5a242cabf30ce9b29895.1453405861.git.jpoimboe@xxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/crypto/aesni-intel_asm.S | 73 +++++++++++++++---------
arch/x86/crypto/camellia-aesni-avx-asm_64.S | 15 +++++
arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 15 +++++
arch/x86/crypto/cast5-avx-x86_64-asm_64.S | 9 +++
arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 13 +++++
arch/x86/crypto/ghash-clmulni-intel_asm.S | 5 ++
arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 13 +++++
arch/x86/crypto/serpent-avx2-asm_64.S | 13 +++++
arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S | 3 +
arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S | 3 +
arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 13 +++++
11 files changed, 148 insertions(+), 27 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index c44cfed..383a6f8 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -31,6 +31,7 @@

#include <linux/linkage.h>
#include <asm/inst.h>
+#include <asm/frame.h>

/*
* The following macros are used to move an (un)aligned 16 byte value to/from
@@ -1800,11 +1801,12 @@ ENDPROC(_key_expansion_256b)
* unsigned int key_len)
*/
ENTRY(aesni_set_key)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
- movl 8(%esp), KEYP # ctx
- movl 12(%esp), UKEYP # in_key
- movl 16(%esp), %edx # key_len
+ movl (FRAME_OFFSET+8)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+12)(%esp), UKEYP # in_key
+ movl (FRAME_OFFSET+16)(%esp), %edx # key_len
#endif
movups (UKEYP), %xmm0 # user key (first 16 bytes)
movaps %xmm0, (KEYP)
@@ -1905,6 +1907,7 @@ ENTRY(aesni_set_key)
#ifndef __x86_64__
popl KEYP
#endif
+ FRAME_END
ret
ENDPROC(aesni_set_key)

@@ -1912,12 +1915,13 @@ ENDPROC(aesni_set_key)
* void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
*/
ENTRY(aesni_enc)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
pushl KLEN
- movl 12(%esp), KEYP
- movl 16(%esp), OUTP
- movl 20(%esp), INP
+ movl (FRAME_OFFSET+12)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+16)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+20)(%esp), INP # src
#endif
movl 480(KEYP), KLEN # key length
movups (INP), STATE # input
@@ -1927,6 +1931,7 @@ ENTRY(aesni_enc)
popl KLEN
popl KEYP
#endif
+ FRAME_END
ret
ENDPROC(aesni_enc)

@@ -2101,12 +2106,13 @@ ENDPROC(_aesni_enc4)
* void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
*/
ENTRY(aesni_dec)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
pushl KLEN
- movl 12(%esp), KEYP
- movl 16(%esp), OUTP
- movl 20(%esp), INP
+ movl (FRAME_OFFSET+12)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+16)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+20)(%esp), INP # src
#endif
mov 480(KEYP), KLEN # key length
add $240, KEYP
@@ -2117,6 +2123,7 @@ ENTRY(aesni_dec)
popl KLEN
popl KEYP
#endif
+ FRAME_END
ret
ENDPROC(aesni_dec)

@@ -2292,14 +2299,15 @@ ENDPROC(_aesni_dec4)
* size_t len)
*/
ENTRY(aesni_ecb_enc)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl LEN
pushl KEYP
pushl KLEN
- movl 16(%esp), KEYP
- movl 20(%esp), OUTP
- movl 24(%esp), INP
- movl 28(%esp), LEN
+ movl (FRAME_OFFSET+16)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+20)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+24)(%esp), INP # src
+ movl (FRAME_OFFSET+28)(%esp), LEN # len
#endif
test LEN, LEN # check length
jz .Lecb_enc_ret
@@ -2342,6 +2350,7 @@ ENTRY(aesni_ecb_enc)
popl KEYP
popl LEN
#endif
+ FRAME_END
ret
ENDPROC(aesni_ecb_enc)

@@ -2350,14 +2359,15 @@ ENDPROC(aesni_ecb_enc)
* size_t len);
*/
ENTRY(aesni_ecb_dec)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl LEN
pushl KEYP
pushl KLEN
- movl 16(%esp), KEYP
- movl 20(%esp), OUTP
- movl 24(%esp), INP
- movl 28(%esp), LEN
+ movl (FRAME_OFFSET+16)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+20)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+24)(%esp), INP # src
+ movl (FRAME_OFFSET+28)(%esp), LEN # len
#endif
test LEN, LEN
jz .Lecb_dec_ret
@@ -2401,6 +2411,7 @@ ENTRY(aesni_ecb_dec)
popl KEYP
popl LEN
#endif
+ FRAME_END
ret
ENDPROC(aesni_ecb_dec)

@@ -2409,16 +2420,17 @@ ENDPROC(aesni_ecb_dec)
* size_t len, u8 *iv)
*/
ENTRY(aesni_cbc_enc)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
pushl LEN
pushl KEYP
pushl KLEN
- movl 20(%esp), KEYP
- movl 24(%esp), OUTP
- movl 28(%esp), INP
- movl 32(%esp), LEN
- movl 36(%esp), IVP
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
#endif
cmp $16, LEN
jb .Lcbc_enc_ret
@@ -2443,6 +2455,7 @@ ENTRY(aesni_cbc_enc)
popl LEN
popl IVP
#endif
+ FRAME_END
ret
ENDPROC(aesni_cbc_enc)

@@ -2451,16 +2464,17 @@ ENDPROC(aesni_cbc_enc)
* size_t len, u8 *iv)
*/
ENTRY(aesni_cbc_dec)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
pushl LEN
pushl KEYP
pushl KLEN
- movl 20(%esp), KEYP
- movl 24(%esp), OUTP
- movl 28(%esp), INP
- movl 32(%esp), LEN
- movl 36(%esp), IVP
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
#endif
cmp $16, LEN
jb .Lcbc_dec_just_ret
@@ -2534,6 +2548,7 @@ ENTRY(aesni_cbc_dec)
popl LEN
popl IVP
#endif
+ FRAME_END
ret
ENDPROC(aesni_cbc_dec)

@@ -2600,6 +2615,7 @@ ENDPROC(_aesni_inc)
* size_t len, u8 *iv)
*/
ENTRY(aesni_ctr_enc)
+ FRAME_BEGIN
cmp $16, LEN
jb .Lctr_enc_just_ret
mov 480(KEYP), KLEN
@@ -2653,6 +2669,7 @@ ENTRY(aesni_ctr_enc)
.Lctr_enc_ret:
movups IV, (IVP)
.Lctr_enc_just_ret:
+ FRAME_END
ret
ENDPROC(aesni_ctr_enc)

@@ -2679,6 +2696,7 @@ ENDPROC(aesni_ctr_enc)
* bool enc, u8 *iv)
*/
ENTRY(aesni_xts_crypt8)
+ FRAME_BEGIN
cmpb $0, %cl
movl $0, %ecx
movl $240, %r10d
@@ -2779,6 +2797,7 @@ ENTRY(aesni_xts_crypt8)
pxor INC, STATE4
movdqu STATE4, 0x70(OUTP)

+ FRAME_END
ret
ENDPROC(aesni_xts_crypt8)

diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index ce71f92..aa9e8bd 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -16,6 +16,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>

#define CAMELLIA_TABLE_BYTE_LEN 272

@@ -726,6 +727,7 @@ __camellia_enc_blk16:
* %xmm0..%xmm15: 16 encrypted blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN

leaq 8 * 16(%rax), %rcx;

@@ -780,6 +782,7 @@ __camellia_enc_blk16:
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));

+ FRAME_END
ret;

.align 8
@@ -812,6 +815,7 @@ __camellia_dec_blk16:
* %xmm0..%xmm15: 16 plaintext blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN

leaq 8 * 16(%rax), %rcx;

@@ -865,6 +869,7 @@ __camellia_dec_blk16:
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));

+ FRAME_END
ret;

.align 8
@@ -890,6 +895,7 @@ ENTRY(camellia_ecb_enc_16way)
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
*/
+ FRAME_BEGIN

inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -904,6 +910,7 @@ ENTRY(camellia_ecb_enc_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);

+ FRAME_END
ret;
ENDPROC(camellia_ecb_enc_16way)

@@ -913,6 +920,7 @@ ENTRY(camellia_ecb_dec_16way)
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
*/
+ FRAME_BEGIN

cmpl $16, key_length(CTX);
movl $32, %r8d;
@@ -932,6 +940,7 @@ ENTRY(camellia_ecb_dec_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);

+ FRAME_END
ret;
ENDPROC(camellia_ecb_dec_16way)

@@ -941,6 +950,7 @@ ENTRY(camellia_cbc_dec_16way)
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
*/
+ FRAME_BEGIN

cmpl $16, key_length(CTX);
movl $32, %r8d;
@@ -981,6 +991,7 @@ ENTRY(camellia_cbc_dec_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);

+ FRAME_END
ret;
ENDPROC(camellia_cbc_dec_16way)

@@ -997,6 +1008,7 @@ ENTRY(camellia_ctr_16way)
* %rdx: src (16 blocks)
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN

subq $(16 * 16), %rsp;
movq %rsp, %rax;
@@ -1092,6 +1104,7 @@ ENTRY(camellia_ctr_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);

+ FRAME_END
ret;
ENDPROC(camellia_ctr_16way)

@@ -1112,6 +1125,7 @@ camellia_xts_crypt_16way:
* %r8: index for input whitening key
* %r9: pointer to __camellia_enc_blk16 or __camellia_dec_blk16
*/
+ FRAME_BEGIN

subq $(16 * 16), %rsp;
movq %rsp, %rax;
@@ -1234,6 +1248,7 @@ camellia_xts_crypt_16way:
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);

+ FRAME_END
ret;
ENDPROC(camellia_xts_crypt_16way)

diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 0e0b886..16186c1 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -11,6 +11,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>

#define CAMELLIA_TABLE_BYTE_LEN 272

@@ -766,6 +767,7 @@ __camellia_enc_blk32:
* %ymm0..%ymm15: 32 encrypted blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN

leaq 8 * 32(%rax), %rcx;

@@ -820,6 +822,7 @@ __camellia_enc_blk32:
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));

+ FRAME_END
ret;

.align 8
@@ -852,6 +855,7 @@ __camellia_dec_blk32:
* %ymm0..%ymm15: 16 plaintext blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN

leaq 8 * 32(%rax), %rcx;

@@ -905,6 +909,7 @@ __camellia_dec_blk32:
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));

+ FRAME_END
ret;

.align 8
@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
*/
+ FRAME_BEGIN

vzeroupper;

@@ -948,6 +954,7 @@ ENTRY(camellia_ecb_enc_32way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(camellia_ecb_enc_32way)

@@ -957,6 +964,7 @@ ENTRY(camellia_ecb_dec_32way)
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
*/
+ FRAME_BEGIN

vzeroupper;

@@ -980,6 +988,7 @@ ENTRY(camellia_ecb_dec_32way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(camellia_ecb_dec_32way)

@@ -989,6 +998,7 @@ ENTRY(camellia_cbc_dec_32way)
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
*/
+ FRAME_BEGIN

vzeroupper;

@@ -1046,6 +1056,7 @@ ENTRY(camellia_cbc_dec_32way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(camellia_cbc_dec_32way)

@@ -1070,6 +1081,7 @@ ENTRY(camellia_ctr_32way)
* %rdx: src (32 blocks)
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN

vzeroupper;

@@ -1184,6 +1196,7 @@ ENTRY(camellia_ctr_32way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(camellia_ctr_32way)

@@ -1216,6 +1229,7 @@ camellia_xts_crypt_32way:
* %r8: index for input whitening key
* %r9: pointer to __camellia_enc_blk32 or __camellia_dec_blk32
*/
+ FRAME_BEGIN

vzeroupper;

@@ -1349,6 +1363,7 @@ camellia_xts_crypt_32way:

vzeroupper;

+ FRAME_END
ret;
ENDPROC(camellia_xts_crypt_32way)

diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index c35fd5d..14fa196 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>

.file "cast5-avx-x86_64-asm_64.S"

@@ -365,6 +366,7 @@ ENTRY(cast5_ecb_enc_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -388,6 +390,7 @@ ENTRY(cast5_ecb_enc_16way)
vmovdqu RR4, (6*4*4)(%r11);
vmovdqu RL4, (7*4*4)(%r11);

+ FRAME_END
ret;
ENDPROC(cast5_ecb_enc_16way)

@@ -398,6 +401,7 @@ ENTRY(cast5_ecb_dec_16way)
* %rdx: src
*/

+ FRAME_BEGIN
movq %rsi, %r11;

vmovdqu (0*4*4)(%rdx), RL1;
@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
vmovdqu RR4, (6*4*4)(%r11);
vmovdqu RL4, (7*4*4)(%r11);

+ FRAME_END
ret;
ENDPROC(cast5_ecb_dec_16way)

@@ -429,6 +434,7 @@ ENTRY(cast5_cbc_dec_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

pushq %r12;

@@ -469,6 +475,7 @@ ENTRY(cast5_cbc_dec_16way)

popq %r12;

+ FRAME_END
ret;
ENDPROC(cast5_cbc_dec_16way)

@@ -479,6 +486,7 @@ ENTRY(cast5_ctr_16way)
* %rdx: src
* %rcx: iv (big endian, 64bit)
*/
+ FRAME_BEGIN

pushq %r12;

@@ -542,5 +550,6 @@ ENTRY(cast5_ctr_16way)

popq %r12;

+ FRAME_END
ret;
ENDPROC(cast5_ctr_16way)
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index e3531f8..c419389 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx.S"

.file "cast6-avx-x86_64-asm_64.S"
@@ -349,6 +350,7 @@ ENTRY(cast6_ecb_enc_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -358,6 +360,7 @@ ENTRY(cast6_ecb_enc_8way)

store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(cast6_ecb_enc_8way)

@@ -367,6 +370,7 @@ ENTRY(cast6_ecb_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)

store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(cast6_ecb_dec_8way)

@@ -385,6 +390,7 @@ ENTRY(cast6_cbc_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

pushq %r12;

@@ -399,6 +405,7 @@ ENTRY(cast6_cbc_dec_8way)

popq %r12;

+ FRAME_END
ret;
ENDPROC(cast6_cbc_dec_8way)

@@ -409,6 +416,7 @@ ENTRY(cast6_ctr_8way)
* %rdx: src
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN

pushq %r12;

@@ -424,6 +432,7 @@ ENTRY(cast6_ctr_8way)

popq %r12;

+ FRAME_END
ret;
ENDPROC(cast6_ctr_8way)

@@ -434,6 +443,7 @@ ENTRY(cast6_xts_enc_8way)
* %rdx: src
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -446,6 +456,7 @@ ENTRY(cast6_xts_enc_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(cast6_xts_enc_8way)

@@ -456,6 +467,7 @@ ENTRY(cast6_xts_dec_8way)
* %rdx: src
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -468,5 +480,6 @@ ENTRY(cast6_xts_dec_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(cast6_xts_dec_8way)
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 5d1e007..eed55c8 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -18,6 +18,7 @@

#include <linux/linkage.h>
#include <asm/inst.h>
+#include <asm/frame.h>

.data

@@ -94,6 +95,7 @@ ENDPROC(__clmul_gf128mul_ble)

/* void clmul_ghash_mul(char *dst, const u128 *shash) */
ENTRY(clmul_ghash_mul)
+ FRAME_BEGIN
movups (%rdi), DATA
movups (%rsi), SHASH
movaps .Lbswap_mask, BSWAP
@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
call __clmul_gf128mul_ble
PSHUFB_XMM BSWAP DATA
movups DATA, (%rdi)
+ FRAME_END
ret
ENDPROC(clmul_ghash_mul)

@@ -109,6 +112,7 @@ ENDPROC(clmul_ghash_mul)
* const u128 *shash);
*/
ENTRY(clmul_ghash_update)
+ FRAME_BEGIN
cmp $16, %rdx
jb .Lupdate_just_ret # check length
movaps .Lbswap_mask, BSWAP
@@ -128,5 +132,6 @@ ENTRY(clmul_ghash_update)
PSHUFB_XMM BSWAP DATA
movups DATA, (%rdi)
.Lupdate_just_ret:
+ FRAME_END
ret
ENDPROC(clmul_ghash_update)
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index 2f202f4..8be5718 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx.S"

.file "serpent-avx-x86_64-asm_64.S"
@@ -681,6 +682,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

@@ -688,6 +690,7 @@ ENTRY(serpent_ecb_enc_8way_avx)

store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(serpent_ecb_enc_8way_avx)

@@ -697,6 +700,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)

store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);

+ FRAME_END
ret;
ENDPROC(serpent_ecb_dec_8way_avx)

@@ -713,6 +718,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

@@ -720,6 +726,7 @@ ENTRY(serpent_cbc_dec_8way_avx)

store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);

+ FRAME_END
ret;
ENDPROC(serpent_cbc_dec_8way_avx)

@@ -730,6 +737,7 @@ ENTRY(serpent_ctr_8way_avx)
* %rdx: src
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN

load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
RD2, RK0, RK1, RK2);
@@ -738,6 +746,7 @@ ENTRY(serpent_ctr_8way_avx)

store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(serpent_ctr_8way_avx)

@@ -748,6 +757,7 @@ ENTRY(serpent_xts_enc_8way_avx)
* %rdx: src
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

/* regs <= src, dst <= IVs, regs <= regs xor IVs */
load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
@@ -758,6 +768,7 @@ ENTRY(serpent_xts_enc_8way_avx)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(serpent_xts_enc_8way_avx)

@@ -768,6 +779,7 @@ ENTRY(serpent_xts_dec_8way_avx)
* %rdx: src
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

/* regs <= src, dst <= IVs, regs <= regs xor IVs */
load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
@@ -778,5 +790,6 @@ ENTRY(serpent_xts_dec_8way_avx)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);

+ FRAME_END
ret;
ENDPROC(serpent_xts_dec_8way_avx)
diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
index b222085..97c48ad 100644
--- a/arch/x86/crypto/serpent-avx2-asm_64.S
+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
@@ -15,6 +15,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx2.S"

.file "serpent-avx2-asm_64.S"
@@ -673,6 +674,7 @@ ENTRY(serpent_ecb_enc_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

vzeroupper;

@@ -684,6 +686,7 @@ ENTRY(serpent_ecb_enc_16way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(serpent_ecb_enc_16way)

@@ -693,6 +696,7 @@ ENTRY(serpent_ecb_dec_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

vzeroupper;

@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(serpent_ecb_dec_16way)

@@ -713,6 +718,7 @@ ENTRY(serpent_cbc_dec_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

vzeroupper;

@@ -725,6 +731,7 @@ ENTRY(serpent_cbc_dec_16way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(serpent_cbc_dec_16way)

@@ -735,6 +742,7 @@ ENTRY(serpent_ctr_16way)
* %rdx: src (16 blocks)
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN

vzeroupper;

@@ -748,6 +756,7 @@ ENTRY(serpent_ctr_16way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(serpent_ctr_16way)

@@ -758,6 +767,7 @@ ENTRY(serpent_xts_enc_16way)
* %rdx: src (16 blocks)
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

vzeroupper;

@@ -772,6 +782,7 @@ ENTRY(serpent_xts_enc_16way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(serpent_xts_enc_16way)

@@ -782,6 +793,7 @@ ENTRY(serpent_xts_dec_16way)
* %rdx: src (16 blocks)
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

vzeroupper;

@@ -796,5 +808,6 @@ ENTRY(serpent_xts_dec_16way)

vzeroupper;

+ FRAME_END
ret;
ENDPROC(serpent_xts_dec_16way)
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
index 672eaeb..96df6a3 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
@@ -52,6 +52,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#include "sha1_mb_mgr_datastruct.S"


@@ -103,6 +104,7 @@ offset = \_offset
# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
# arg 1 : rcx : state
ENTRY(sha1_mb_mgr_flush_avx2)
+ FRAME_BEGIN
push %rbx

# If bit (32+3) is set, then all lanes are empty
@@ -212,6 +214,7 @@ len_is_0:

return:
pop %rbx
+ FRAME_END
ret

return_null:
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
index c3b9447..1435acf 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
@@ -53,6 +53,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>
#include "sha1_mb_mgr_datastruct.S"


@@ -98,6 +99,7 @@ lane_data = %r10
# arg 1 : rcx : state
# arg 2 : rdx : job
ENTRY(sha1_mb_mgr_submit_avx2)
+ FRAME_BEGIN
push %rbx
push %r12

@@ -192,6 +194,7 @@ len_is_0:
return:
pop %r12
pop %rbx
+ FRAME_END
ret

return_null:
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index 0505813..dc66273 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/

#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx.S"

.file "twofish-avx-x86_64-asm_64.S"
@@ -333,6 +334,7 @@ ENTRY(twofish_ecb_enc_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -342,6 +344,7 @@ ENTRY(twofish_ecb_enc_8way)

store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);

+ FRAME_END
ret;
ENDPROC(twofish_ecb_enc_8way)

@@ -351,6 +354,7 @@ ENTRY(twofish_ecb_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)

store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(twofish_ecb_dec_8way)

@@ -369,6 +374,7 @@ ENTRY(twofish_cbc_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN

pushq %r12;

@@ -383,6 +389,7 @@ ENTRY(twofish_cbc_dec_8way)

popq %r12;

+ FRAME_END
ret;
ENDPROC(twofish_cbc_dec_8way)

@@ -393,6 +400,7 @@ ENTRY(twofish_ctr_8way)
* %rdx: src
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN

pushq %r12;

@@ -408,6 +416,7 @@ ENTRY(twofish_ctr_8way)

popq %r12;

+ FRAME_END
ret;
ENDPROC(twofish_ctr_8way)

@@ -418,6 +427,7 @@ ENTRY(twofish_xts_enc_8way)
* %rdx: src
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -430,6 +440,7 @@ ENTRY(twofish_xts_enc_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);

+ FRAME_END
ret;
ENDPROC(twofish_xts_enc_8way)

@@ -440,6 +451,7 @@ ENTRY(twofish_xts_dec_8way)
* %rdx: src
* %rcx: iv (t â Îâ â GF(2ÂÂâ))
*/
+ FRAME_BEGIN

movq %rsi, %r11;

@@ -452,5 +464,6 @@ ENTRY(twofish_xts_dec_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

+ FRAME_END
ret;
ENDPROC(twofish_xts_dec_8way)