1 ; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s
\r
3 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
\r
4 target triple = "x86_64-unknown-unknown"
\r
6 ; Stack reload folding tests - we use the 'big vectors' pattern to guarantee spilling to stack.
\r
8 ; Many of these tests are primarily to check memory folding with specific instructions. Using a basic
\r
9 ; load/cvt/store pattern to test for this would mean that it wouldn't be the memory folding code thats
\r
10 ; being tested - the load-execute version of the instruction from the tables would be matched instead.
\r
12 define void @stack_fold_vmulpd(<64 x double>* %a, <64 x double>* %b, <64 x double>* %c) {
\r
13 ;CHECK-LABEL: stack_fold_vmulpd
\r
14 ;CHECK: vmulpd {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
\r
16 %1 = load <64 x double>* %a
\r
17 %2 = load <64 x double>* %b
\r
18 %3 = fadd <64 x double> %1, %2
\r
19 %4 = fsub <64 x double> %1, %2
\r
20 %5 = fmul <64 x double> %3, %4
\r
21 store <64 x double> %5, <64 x double>* %c
\r
25 define void @stack_fold_cvtdq2ps(<128 x i32>* %a, <128 x i32>* %b, <128 x float>* %c) {
\r
26 ;CHECK-LABEL: stack_fold_cvtdq2ps
\r
27 ;CHECK: vcvtdq2ps {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
\r
29 %1 = load <128 x i32>* %a
\r
30 %2 = load <128 x i32>* %b
\r
31 %3 = and <128 x i32> %1, %2
\r
32 %4 = xor <128 x i32> %1, %2
\r
33 %5 = sitofp <128 x i32> %3 to <128 x float>
\r
34 %6 = sitofp <128 x i32> %4 to <128 x float>
\r
35 %7 = fadd <128 x float> %5, %6
\r
36 store <128 x float> %7, <128 x float>* %c
\r
40 define void @stack_fold_cvttpd2dq(<64 x double>* %a, <64 x double>* %b, <64 x i32>* %c) #0 {
\r
41 ;CHECK-LABEL: stack_fold_cvttpd2dq
\r
42 ;CHECK: vcvttpd2dqy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
\r
44 %1 = load <64 x double>* %a
\r
45 %2 = load <64 x double>* %b
\r
46 %3 = fadd <64 x double> %1, %2
\r
47 %4 = fsub <64 x double> %1, %2
\r
48 %5 = fptosi <64 x double> %3 to <64 x i32>
\r
49 %6 = fptosi <64 x double> %4 to <64 x i32>
\r
50 %7 = or <64 x i32> %5, %6
\r
51 store <64 x i32> %7, <64 x i32>* %c
\r
55 define void @stack_fold_cvttps2dq(<128 x float>* %a, <128 x float>* %b, <128 x i32>* %c) #0 {
\r
56 ;CHECK-LABEL: stack_fold_cvttps2dq
\r
57 ;CHECK: vcvttps2dq {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
\r
59 %1 = load <128 x float>* %a
\r
60 %2 = load <128 x float>* %b
\r
61 %3 = fadd <128 x float> %1, %2
\r
62 %4 = fsub <128 x float> %1, %2
\r
63 %5 = fptosi <128 x float> %3 to <128 x i32>
\r
64 %6 = fptosi <128 x float> %4 to <128 x i32>
\r
65 %7 = or <128 x i32> %5, %6
\r
66 store <128 x i32> %7, <128 x i32>* %c
\r