1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s
4 ; Verify that the DAGCombiner is able to fold a vector AND into a blend
5 ; if one of the operands to the AND is a vector of all constants, and each
6 ; constant element is either zero or all-ones.
9 define <4 x i32> @test1(<4 x i32> %A) {
12 ; CHECK-NEXT: pxor %xmm1, %xmm1
13 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
15 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 0>
19 define <4 x i32> @test2(<4 x i32> %A) {
22 ; CHECK-NEXT: pxor %xmm1, %xmm1
23 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
25 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 0>
29 define <4 x i32> @test3(<4 x i32> %A) {
32 ; CHECK-NEXT: pxor %xmm1, %xmm1
33 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
35 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 0>
39 define <4 x i32> @test4(<4 x i32> %A) {
42 ; CHECK-NEXT: pxor %xmm1, %xmm1
43 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
45 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 0, i32 -1>
49 define <4 x i32> @test5(<4 x i32> %A) {
52 ; CHECK-NEXT: pxor %xmm1, %xmm1
53 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
55 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
59 define <4 x i32> @test6(<4 x i32> %A) {
62 ; CHECK-NEXT: pxor %xmm1, %xmm1
63 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
65 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
69 define <4 x i32> @test7(<4 x i32> %A) {
72 ; CHECK-NEXT: pxor %xmm1, %xmm1
73 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
75 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 -1>
79 define <4 x i32> @test8(<4 x i32> %A) {
82 ; CHECK-NEXT: pxor %xmm1, %xmm1
83 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
85 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 -1>
89 define <4 x i32> @test9(<4 x i32> %A) {
92 ; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
94 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 0>
98 define <4 x i32> @test10(<4 x i32> %A) {
99 ; CHECK-LABEL: test10:
101 ; CHECK-NEXT: pxor %xmm1, %xmm1
102 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
104 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 0>
108 define <4 x i32> @test11(<4 x i32> %A) {
109 ; CHECK-LABEL: test11:
111 ; CHECK-NEXT: pxor %xmm1, %xmm1
112 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
114 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 -1>
118 define <4 x i32> @test12(<4 x i32> %A) {
119 ; CHECK-LABEL: test12:
121 ; CHECK-NEXT: pxor %xmm1, %xmm1
122 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
124 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 -1, i32 0>
128 define <4 x i32> @test13(<4 x i32> %A) {
129 ; CHECK-LABEL: test13:
131 ; CHECK-NEXT: pxor %xmm1, %xmm1
132 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
134 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 -1>
138 define <4 x i32> @test14(<4 x i32> %A) {
139 ; CHECK-LABEL: test14:
141 ; CHECK-NEXT: pxor %xmm1, %xmm1
142 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
144 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
148 define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
149 ; CHECK-LABEL: test15:
151 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
153 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
154 %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 0>
155 %3 = or <4 x i32> %1, %2
159 define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
160 ; CHECK-LABEL: test16:
162 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
164 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
165 %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 -1>
166 %3 = or <4 x i32> %1, %2
170 define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
171 ; CHECK-LABEL: test17:
173 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
175 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
176 %2 = and <4 x i32> %B, <i32 -1, i32 0, i32 -1, i32 0>
177 %3 = or <4 x i32> %1, %2