From 54db4092c1098cfde4692279fbe923084ecb63ee Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 14 Mar 2015 23:16:43 +0000 Subject: [PATCH] Simplified some stack folding tests. Replaced explicit pmovzx* intrinsic tests with general shuffles git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@232286 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/stack-folding-int-avx1.ll | 36 ++++++++++----------- test/CodeGen/X86/stack-folding-int-sse42.ll | 36 ++++++++++----------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/test/CodeGen/X86/stack-folding-int-avx1.ll b/test/CodeGen/X86/stack-folding-int-avx1.ll index 23874935d25..a9a21c2f20a 100644 --- a/test/CodeGen/X86/stack-folding-int-avx1.ll +++ b/test/CodeGen/X86/stack-folding-int-avx1.ll @@ -721,55 +721,55 @@ define <4 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) { ;CHECK-LABEL: stack_fold_pmovzxbd ;CHECK: vpmovzxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) - ret <4 x i32> %2 + %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> + %3 = bitcast <16 x i8> %2 to <4 x i32> + ret <4 x i32> %3 } -declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone define <2 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) { ;CHECK-LABEL: stack_fold_pmovzxbq ;CHECK: vpmovzxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) - ret <2 x i64> %2 + %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> + %3 = bitcast <16 x i8> %2 to <2 x i64> + ret <2 x i64> %3 } -declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone define <8 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) { ;CHECK-LABEL: stack_fold_pmovzxbw ;CHECK: vpmovzxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) - ret <8 x i16> %2 + %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> + %3 = bitcast <16 x i8> %2 to <8 x i16> + ret <8 x i16> %3 } -declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone define <2 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) { ;CHECK-LABEL: stack_fold_pmovzxdq ;CHECK: vpmovzxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) - ret <2 x i64> %2 + %2 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> + %3 = bitcast <4 x i32> %2 to <2 x i64> + ret <2 x i64> %3 } -declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone define <4 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) { ;CHECK-LABEL: stack_fold_pmovzxwd ;CHECK: vpmovzxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) - ret <4 x i32> %2 + %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> + %3 = bitcast <8 x i16> %2 to <4 x i32> + ret <4 x i32> %3 } -declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone define <2 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) { ;CHECK-LABEL: stack_fold_pmovzxwq ;CHECK: vpmovzxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) - ret <2 x i64> %2 + %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> + %3 = bitcast <8 x i16> %2 to <2 x i64> + ret <2 x i64> %3 } -declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone define <2 x i64> @stack_fold_pmuldq(<4 x i32> %a0, <4 x i32> %a1) { ;CHECK-LABEL: stack_fold_pmuldq diff --git a/test/CodeGen/X86/stack-folding-int-sse42.ll b/test/CodeGen/X86/stack-folding-int-sse42.ll index 099a5db7d99..6aa26010783 100644 --- a/test/CodeGen/X86/stack-folding-int-sse42.ll +++ b/test/CodeGen/X86/stack-folding-int-sse42.ll @@ -721,55 +721,55 @@ define <4 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) { ;CHECK-LABEL: stack_fold_pmovzxbd ;CHECK: pmovzxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) - ret <4 x i32> %2 + %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> + %3 = bitcast <16 x i8> %2 to <4 x i32> + ret <4 x i32> %3 } -declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone define <2 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) { ;CHECK-LABEL: stack_fold_pmovzxbq ;CHECK: pmovzxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) - ret <2 x i64> %2 + %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> + %3 = bitcast <16 x i8> %2 to <2 x i64> + ret <2 x i64> %3 } -declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone define <8 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) { ;CHECK-LABEL: stack_fold_pmovzxbw ;CHECK: pmovzxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) - ret <8 x i16> %2 + %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> + %3 = bitcast <16 x i8> %2 to <8 x i16> + ret <8 x i16> %3 } -declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone define <2 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) { ;CHECK-LABEL: stack_fold_pmovzxdq ;CHECK: pmovzxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) - ret <2 x i64> %2 + %2 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> + %3 = bitcast <4 x i32> %2 to <2 x i64> + ret <2 x i64> %3 } -declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone define <4 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) { ;CHECK-LABEL: stack_fold_pmovzxwd ;CHECK: pmovzxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) - ret <4 x i32> %2 + %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> + %3 = bitcast <8 x i16> %2 to <4 x i32> + ret <4 x i32> %3 } -declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone define <2 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) { ;CHECK-LABEL: stack_fold_pmovzxwq ;CHECK: pmovzxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) - ret <2 x i64> %2 + %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> + %3 = bitcast <8 x i16> %2 to <2 x i64> + ret <2 x i64> %3 } -declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone define <2 x i64> @stack_fold_pmuldq(<4 x i32> %a0, <4 x i32> %a1) { ;CHECK-LABEL: stack_fold_pmuldq -- 2.34.1