; CHECK-NEXT: paddd (%{{.*}}), %[[R0]]
; CHECK-NEXT: pextrd $2, %[[R0]], 8(%{{.*}})
; CHECK-NEXT: movq %[[R0]], (%{{.*}})
- %a = load %i32vec3* %ap, align 16
- %b = load %i32vec3* %bp, align 16
+ %a = load %i32vec3, %i32vec3* %ap, align 16
+ %b = load %i32vec3, %i32vec3* %bp, align 16
%x = add %i32vec3 %a, %b
store %i32vec3 %x, %i32vec3* %ret, align 16
ret void
; CHECK-NEXT: paddd %[[R0]], %[[R1]]
; CHECK-NEXT: pextrd $2, %[[R1]], 8(%{{.*}})
; CHECK-NEXT: movq %[[R1]], (%{{.*}})
- %a = load %i32vec3* %ap, align 8
- %b = load %i32vec3* %bp, align 8
+ %a = load %i32vec3, %i32vec3* %ap, align 8
+ %b = load %i32vec3, %i32vec3* %bp, align 8
%x = add %i32vec3 %a, %b
store %i32vec3 %x, %i32vec3* %ret, align 8
ret void
; CHECK-NEXT: pextrd $2, %[[R1]], 24(%{{.*}})
; CHECK-NEXT: movq %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i32vec7* %ap, align 16
- %b = load %i32vec7* %bp, align 16
+ %a = load %i32vec7, %i32vec7* %ap, align 16
+ %b = load %i32vec7, %i32vec7* %bp, align 16
%x = add %i32vec7 %a, %b
store %i32vec7 %x, %i32vec7* %ret, align 16
ret void
; CHECK-NEXT: movdqa %[[R2]], 32(%{{.*}})
; CHECK-NEXT: movdqa %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i32vec12* %ap, align 16
- %b = load %i32vec12* %bp, align 16
+ %a = load %i32vec12, %i32vec12* %ap, align 16
+ %b = load %i32vec12, %i32vec12* %bp, align 16
%x = add %i32vec12 %a, %b
store %i32vec12 %x, %i32vec12* %ret, align 16
ret void
; CHECK: pmovzxwd (%{{.*}}), %[[R0:xmm[0-9]+]]
; CHECK-NEXT: pmovzxwd (%{{.*}}), %[[R1:xmm[0-9]+]]
; CHECK-NEXT: paddd %[[R0]], %[[R1]]
-; CHECK-NEXT: movdqa %[[R1]], %[[R0]]
-; CHECK-NEXT: pshufb {{.*}}, %[[R0]]
-; CHECK-NEXT: pmovzxdq %[[R0]], %[[R0]]
; CHECK-NEXT: pextrw $4, %[[R1]], 4(%{{.*}})
+; CHECK-NEXT: pshufb {{.*}}, %[[R1]]
+; CHECK-NEXT: pmovzxdq %[[R1]], %[[R0]]
; CHECK-NEXT: movd %[[R0]], (%{{.*}})
- %a = load %i16vec3* %ap, align 16
- %b = load %i16vec3* %bp, align 16
+ %a = load %i16vec3, %i16vec3* %ap, align 16
+ %b = load %i16vec3, %i16vec3* %bp, align 16
%x = add %i16vec3 %a, %b
store %i16vec3 %x, %i16vec3* %ret, align 16
ret void
; CHECK-NEXT: movq (%{{.*}}), %[[R1:xmm[0-9]+]]
; CHECK-NEXT: paddw %[[R0]], %[[R1]]
; CHECK-NEXT: movq %[[R1]], (%{{.*}})
- %a = load %i16vec4* %ap, align 16
- %b = load %i16vec4* %bp, align 16
+ %a = load %i16vec4, %i16vec4* %ap, align 16
+ %b = load %i16vec4, %i16vec4* %bp, align 16
%x = add %i16vec4 %a, %b
store %i16vec4 %x, %i16vec4* %ret, align 16
ret void
; CHECK-NEXT: paddw 16(%{{.*}}), %[[R1]]
; CHECK-NEXT: movq %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i16vec12* %ap, align 16
- %b = load %i16vec12* %bp, align 16
+ %a = load %i16vec12, %i16vec12* %ap, align 16
+ %b = load %i16vec12, %i16vec12* %bp, align 16
%x = add %i16vec12 %a, %b
store %i16vec12 %x, %i16vec12* %ret, align 16
ret void
; CHECK-NEXT: movd %[[R2]], 32(%{{.*}})
; CHECK-NEXT: movdqa %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i16vec18* %ap, align 16
- %b = load %i16vec18* %bp, align 16
+ %a = load %i16vec18, %i16vec18* %ap, align 16
+ %b = load %i16vec18, %i16vec18* %bp, align 16
%x = add %i16vec18 %a, %b
store %i16vec18 %x, %i16vec18* %ret, align 16
ret void
; CHECK: pmovzxbd (%{{.*}}), %[[R0:xmm[0-9]+]]
; CHECK-NEXT: pmovzxbd (%{{.*}}), %[[R1:xmm[0-9]+]]
; CHECK-NEXT: paddd %[[R0]], %[[R1]]
-; CHECK-NEXT: movdqa %[[R1]], %[[R0]]
-; CHECK-NEXT: pshufb {{.*}}, %[[R0]]
-; CHECK-NEXT: pmovzxwq %[[R0]], %[[R0]]
; CHECK-NEXT: pextrb $8, %[[R1]], 2(%{{.*}})
+; CHECK-NEXT: pshufb {{.*}}, %[[R1]]
+; CHECK-NEXT: pmovzxwq %[[R1]], %[[R0]]
; CHECK-NEXT: movd %[[R0]], %e[[R2:[abcd]]]x
; CHECK-NEXT: movw %[[R2]]x, (%{{.*}})
- %a = load %i8vec3* %ap, align 16
- %b = load %i8vec3* %bp, align 16
+ %a = load %i8vec3, %i8vec3* %ap, align 16
+ %b = load %i8vec3, %i8vec3* %bp, align 16
%x = add %i8vec3 %a, %b
store %i8vec3 %x, %i8vec3* %ret, align 16
ret void
; CHECK-NEXT: pextrd $2, %[[R1]], 24(%{{.*}})
; CHECK-NEXT: movq %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i8vec31* %ap, align 16
- %b = load %i8vec31* %bp, align 16
+ %a = load %i8vec31, %i8vec31* %ap, align 16
+ %b = load %i8vec31, %i8vec31* %bp, align 16
%x = add %i8vec31 %a, %b
store %i8vec31 %x, %i8vec31* %ret, align 16
ret void
; CHECK-NEXT: movw %[[R1]]x, (%[[PTR1:.*]])
; CHECK-NEXT: movb $1, 2(%[[PTR1]])
; CHECK-NEXT: pmovzxbd (%[[PTR0]]), %[[X0:xmm[0-9]+]]
-; CHECK-NEXT: pand {{.*}}, %[[X0]]
-; CHECK-NEXT: pextrd $1, %[[X0]], %e[[R0:[abcd]]]x
-; CHECK-NEXT: shrl %e[[R0]]x
-; CHECK-NEXT: movd %[[X0]], %e[[R1:[abcd]]]x
-; CHECK-NEXT: shrl %e[[R1]]x
-; CHECK-NEXT: movd %e[[R1]]x, %[[X1:xmm[0-9]+]]
-; CHECK-NEXT: pinsrd $1, %e[[R0]]x, %[[X1]]
-; CHECK-NEXT: pextrd $2, %[[X0]], %e[[R0:[abcd]]]x
-; CHECK-NEXT: shrl %e[[R0]]x
-; CHECK-NEXT: pinsrd $2, %e[[R0]]x, %[[X1]]
-; CHECK-NEXT: pextrd $3, %[[X0]], %e[[R0:[abcd]]]x
-; CHECK-NEXT: pinsrd $3, %e[[R0]]x, %[[X1]]
-; CHECK-NEXT: movdqa %[[X1]], %[[X2:xmm[0-9]+]]
-; CHECK-NEXT: pshufb %[[SHUFFLE_MASK]], %[[X2]]
-; CHECK-NEXT: pmovzxwq %[[X2]], %[[X3:xmm[0-9]+]]
+; CHECK-NEXT: movdqa %[[X0]], %[[X1:xmm[0-9]+]]
+; CHECK-NEXT: psrld $1, %[[X1]]
+; CHECK-NEXT: pblendw $192, %[[X0]], %[[X1]]
; CHECK-NEXT: pextrb $8, %[[X1]], 2(%{{.*}})
+; CHECK-NEXT: pshufb %[[SHUFFLE_MASK]], %[[X1]]
+; CHECK-NEXT: pmovzxwq %[[X1]], %[[X3:xmm[0-9]+]]
; CHECK-NEXT: movd %[[X3]], %e[[R0:[abcd]]]x
; CHECK-NEXT: movw %[[R0]]x, (%{{.*}})
store <3 x i8> <i8 -98, i8 -98, i8 -98>, <3 x i8>* %storetmp
%storetmp1 = bitcast %i8vec3pack* %rot to <3 x i8>*
store <3 x i8> <i8 1, i8 1, i8 1>, <3 x i8>* %storetmp1
- %tmp = load %i8vec3pack* %X
+ %tmp = load %i8vec3pack, %i8vec3pack* %X
%extractVec = extractvalue %i8vec3pack %tmp, 0
- %tmp2 = load %i8vec3pack* %rot
+ %tmp2 = load %i8vec3pack, %i8vec3pack* %rot
%extractVec3 = extractvalue %i8vec3pack %tmp2, 0
%shr = lshr <3 x i8> %extractVec, %extractVec3
%storetmp4 = bitcast %i8vec3pack* %result to <3 x i8>*