target triple = "x86_64-unknown-unknown"
-define <2 x i64> @testv2i64(<2 x i64> %in) {
+define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cmoveq %rcx, %rax
; SSE2-NEXT: xorq $63, %rax
; SSE2-NEXT: movd %rax, %xmm1
-; SSE2-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: bsrq %rax, %rax
; SSE2-NEXT: cmoveq %rcx, %rax
; SSE2-NEXT: xorq $63, %rax
; SSE2-NEXT: movd %rax, %xmm0
-; SSE2-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-NEXT: cmoveq %rcx, %rax
; SSE3-NEXT: xorq $63, %rax
; SSE3-NEXT: movd %rax, %xmm1
-; SSE3-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE3-NEXT: movd %xmm0, %rax
; SSE3-NEXT: bsrq %rax, %rax
; SSE3-NEXT: cmoveq %rcx, %rax
; SSE3-NEXT: xorq $63, %rax
; SSE3-NEXT: movd %rax, %xmm0
-; SSE3-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
+; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE3-NEXT: movdqa %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-NEXT: cmoveq %rcx, %rax
; SSSE3-NEXT: xorq $63, %rax
; SSSE3-NEXT: movd %rax, %xmm1
-; SSSE3-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSSE3-NEXT: movd %xmm0, %rax
; SSSE3-NEXT: bsrq %rax, %rax
; SSSE3-NEXT: cmoveq %rcx, %rax
; SSSE3-NEXT: xorq $63, %rax
; SSSE3-NEXT: movd %rax, %xmm0
-; SSSE3-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
-
;
; SSE41-LABEL: testv2i64:
; SSE41: # BB#0:
ret <2 x i64> %out
}
-define <2 x i64> @testv2i64u(<2 x i64> %in) {
+define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64u:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %rax
ret <2 x i64> %out
}
-define <4 x i32> @testv4i32(<4 x i32> %in) {
+define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
ret <4 x i32> %out
}
-define <4 x i32> @testv4i32u(<4 x i32> %in) {
+define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32u:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
ret <4 x i32> %out
}
-define <8 x i16> @testv8i16(<8 x i16> %in) {
+define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16:
; SSE2: # BB#0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
ret <8 x i16> %out
}
-define <8 x i16> @testv8i16u(<8 x i16> %in) {
+define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16u:
; SSE2: # BB#0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
ret <8 x i16> %out
}
-define <16 x i8> @testv16i8(<16 x i8> %in) {
+define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8:
; SSE2: # BB#0:
-; SSE2: pushq %rbp
-; SSE2: movaps %xmm0, -24(%rsp)
-; SSE2-NEXT: movzbl -9(%rsp), %eax
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: bsrl %eax, %ecx
; SSE2-NEXT: movl $15, %eax
; SSE2-NEXT: cmovel %eax, %ecx
; SSE2-NEXT: xorl $7, %ecx
; SSE2-NEXT: movd %ecx, %xmm0
-; SSE2-NEXT: movzbl -10(%rsp), %ebx
-; SSE2-NEXT: movzbl -11(%rsp), %edi
-; SSE2-NEXT: movzbl -12(%rsp), %r9d
-; SSE2-NEXT: movzbl -13(%rsp), %edx
-; SSE2-NEXT: movzbl -14(%rsp), %r11d
-; SSE2-NEXT: movzbl -15(%rsp), %esi
-; SSE2-NEXT: movzbl -16(%rsp), %r8d
-; SSE2-NEXT: movzbl -17(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE2-NEXT: bsrl %ecx, %ecx
; SSE2-NEXT: cmovel %eax, %ecx
; SSE2-NEXT: xorl $7, %ecx
; SSE2-NEXT: cmovel %eax, %ecx
; SSE2-NEXT: xorl $7, %ecx
; SSE2-NEXT: movd %ecx, %xmm2
-; SSE2-NEXT: movzbl -18(%rsp), %edx
-; SSE2-NEXT: movzbl -19(%rsp), %ecx
-; SSE2-NEXT: movzbl -20(%rsp), %r10d
-; SSE2-NEXT: movzbl -21(%rsp), %ebp
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
; SSE2-NEXT: bsrl %ebp, %ebp
; SSE2-NEXT: cmovel %eax, %ebp
; SSE2-NEXT: xorl $7, %ebp
; SSE2-NEXT: cmovel %eax, %ecx
; SSE2-NEXT: xorl $7, %ecx
; SSE2-NEXT: movd %ecx, %xmm3
-; SSE2-NEXT: movzbl -22(%rsp), %esi
-; SSE2-NEXT: movzbl -23(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE2-NEXT: bsrl %ecx, %ecx
; SSE2-NEXT: cmovel %eax, %ecx
; SSE2-NEXT: xorl $7, %ecx
; SSE2-NEXT: cmovel %eax, %ecx
; SSE2-NEXT: xorl $7, %ecx
; SSE2-NEXT: movd %ecx, %xmm4
-; SSE2-NEXT: movzbl -24(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE2-NEXT: bsrl %ecx, %ecx
; SSE2-NEXT: cmovel %eax, %ecx
; SSE2-NEXT: xorl $7, %ecx
;
; SSE3-LABEL: testv16i8:
; SSE3: # BB#0:
-; SSE3: pushq %rbp
-; SSE3: movaps %xmm0, -24(%rsp)
-; SSE3-NEXT: movzbl -9(%rsp), %eax
+; SSE3-NEXT: pushq %rbp
+; SSE3-NEXT: pushq %rbx
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE3-NEXT: bsrl %eax, %ecx
; SSE3-NEXT: movl $15, %eax
; SSE3-NEXT: cmovel %eax, %ecx
; SSE3-NEXT: xorl $7, %ecx
; SSE3-NEXT: movd %ecx, %xmm0
-; SSE3-NEXT: movzbl -10(%rsp), %ebx
-; SSE3-NEXT: movzbl -11(%rsp), %edi
-; SSE3-NEXT: movzbl -12(%rsp), %r9d
-; SSE3-NEXT: movzbl -13(%rsp), %edx
-; SSE3-NEXT: movzbl -14(%rsp), %r11d
-; SSE3-NEXT: movzbl -15(%rsp), %esi
-; SSE3-NEXT: movzbl -16(%rsp), %r8d
-; SSE3-NEXT: movzbl -17(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE3-NEXT: bsrl %ecx, %ecx
; SSE3-NEXT: cmovel %eax, %ecx
; SSE3-NEXT: xorl $7, %ecx
; SSE3-NEXT: cmovel %eax, %ecx
; SSE3-NEXT: xorl $7, %ecx
; SSE3-NEXT: movd %ecx, %xmm2
-; SSE3-NEXT: movzbl -18(%rsp), %edx
-; SSE3-NEXT: movzbl -19(%rsp), %ecx
-; SSE3-NEXT: movzbl -20(%rsp), %r10d
-; SSE3-NEXT: movzbl -21(%rsp), %ebp
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
; SSE3-NEXT: bsrl %ebp, %ebp
; SSE3-NEXT: cmovel %eax, %ebp
; SSE3-NEXT: xorl $7, %ebp
; SSE3-NEXT: cmovel %eax, %ecx
; SSE3-NEXT: xorl $7, %ecx
; SSE3-NEXT: movd %ecx, %xmm3
-; SSE3-NEXT: movzbl -22(%rsp), %esi
-; SSE3-NEXT: movzbl -23(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE3-NEXT: bsrl %ecx, %ecx
; SSE3-NEXT: cmovel %eax, %ecx
; SSE3-NEXT: xorl $7, %ecx
; SSE3-NEXT: cmovel %eax, %ecx
; SSE3-NEXT: xorl $7, %ecx
; SSE3-NEXT: movd %ecx, %xmm4
-; SSE3-NEXT: movzbl -24(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE3-NEXT: bsrl %ecx, %ecx
; SSE3-NEXT: cmovel %eax, %ecx
; SSE3-NEXT: xorl $7, %ecx
;
; SSSE3-LABEL: testv16i8:
; SSSE3: # BB#0:
-; SSSE3: pushq %rbp
-; SSSE3: movaps %xmm0, -24(%rsp)
-; SSSE3-NEXT: movzbl -9(%rsp), %eax
+; SSSE3-NEXT: pushq %rbp
+; SSSE3-NEXT: pushq %rbx
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: bsrl %eax, %ecx
; SSSE3-NEXT: movl $15, %eax
; SSSE3-NEXT: cmovel %eax, %ecx
; SSSE3-NEXT: xorl $7, %ecx
; SSSE3-NEXT: movd %ecx, %xmm0
-; SSSE3-NEXT: movzbl -10(%rsp), %ebx
-; SSSE3-NEXT: movzbl -11(%rsp), %edi
-; SSSE3-NEXT: movzbl -12(%rsp), %r9d
-; SSSE3-NEXT: movzbl -13(%rsp), %edx
-; SSSE3-NEXT: movzbl -14(%rsp), %r11d
-; SSSE3-NEXT: movzbl -15(%rsp), %esi
-; SSSE3-NEXT: movzbl -16(%rsp), %r8d
-; SSSE3-NEXT: movzbl -17(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSSE3-NEXT: bsrl %ecx, %ecx
; SSSE3-NEXT: cmovel %eax, %ecx
; SSSE3-NEXT: xorl $7, %ecx
; SSSE3-NEXT: cmovel %eax, %ecx
; SSSE3-NEXT: xorl $7, %ecx
; SSSE3-NEXT: movd %ecx, %xmm2
-; SSSE3-NEXT: movzbl -18(%rsp), %edx
-; SSSE3-NEXT: movzbl -19(%rsp), %ecx
-; SSSE3-NEXT: movzbl -20(%rsp), %r10d
-; SSSE3-NEXT: movzbl -21(%rsp), %ebp
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
; SSSE3-NEXT: bsrl %ebp, %ebp
; SSSE3-NEXT: cmovel %eax, %ebp
; SSSE3-NEXT: xorl $7, %ebp
; SSSE3-NEXT: cmovel %eax, %ecx
; SSSE3-NEXT: xorl $7, %ecx
; SSSE3-NEXT: movd %ecx, %xmm3
-; SSSE3-NEXT: movzbl -22(%rsp), %esi
-; SSSE3-NEXT: movzbl -23(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSSE3-NEXT: bsrl %ecx, %ecx
; SSSE3-NEXT: cmovel %eax, %ecx
; SSSE3-NEXT: xorl $7, %ecx
; SSSE3-NEXT: cmovel %eax, %ecx
; SSSE3-NEXT: xorl $7, %ecx
; SSSE3-NEXT: movd %ecx, %xmm4
-; SSSE3-NEXT: movzbl -24(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSSE3-NEXT: bsrl %ecx, %ecx
; SSSE3-NEXT: cmovel %eax, %ecx
; SSSE3-NEXT: xorl $7, %ecx
ret <16 x i8> %out
}
-define <16 x i8> @testv16i8u(<16 x i8> %in) {
+define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8u:
; SSE2: # BB#0:
-; SSE2: pushq %rbx
-; SSE2: movaps %xmm0, -16(%rsp)
-; SSE2-NEXT: movzbl -1(%rsp), %eax
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: bsrl %eax, %eax
; SSE2-NEXT: xorl $7, %eax
; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movzbl -2(%rsp), %edi
-; SSE2-NEXT: movzbl -3(%rsp), %edx
-; SSE2-NEXT: movzbl -4(%rsp), %r9d
-; SSE2-NEXT: movzbl -5(%rsp), %eax
-; SSE2-NEXT: movzbl -6(%rsp), %r10d
-; SSE2-NEXT: movzbl -7(%rsp), %ecx
-; SSE2-NEXT: movzbl -8(%rsp), %r8d
-; SSE2-NEXT: movzbl -9(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSE2-NEXT: bsrl %esi, %esi
; SSE2-NEXT: xorl $7, %esi
; SSE2-NEXT: movd %esi, %xmm1
; SSE2-NEXT: bsrl %eax, %eax
; SSE2-NEXT: xorl $7, %eax
; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movzbl -10(%rsp), %eax
-; SSE2-NEXT: movzbl -11(%rsp), %esi
-; SSE2-NEXT: movzbl -12(%rsp), %r11d
-; SSE2-NEXT: movzbl -13(%rsp), %ebx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
; SSE2-NEXT: bsrl %ebx, %ebx
; SSE2-NEXT: xorl $7, %ebx
; SSE2-NEXT: movd %ebx, %xmm2
; SSE2-NEXT: bsrl %ecx, %ecx
; SSE2-NEXT: xorl $7, %ecx
; SSE2-NEXT: movd %ecx, %xmm0
-; SSE2-NEXT: movzbl -14(%rsp), %ecx
-; SSE2-NEXT: movzbl -15(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSE2-NEXT: bsrl %edx, %edx
; SSE2-NEXT: xorl $7, %edx
; SSE2-NEXT: movd %edx, %xmm1
; SSE2-NEXT: bsrl %r8d, %eax
; SSE2-NEXT: xorl $7, %eax
; SSE2-NEXT: movd %eax, %xmm4
-; SSE2-NEXT: movzbl -16(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: bsrl %eax, %eax
; SSE2-NEXT: xorl $7, %eax
; SSE2-NEXT: movd %eax, %xmm0
;
; SSE3-LABEL: testv16i8u:
; SSE3: # BB#0:
-; SSE3: pushq %rbx
-; SSE3: movaps %xmm0, -16(%rsp)
-; SSE3-NEXT: movzbl -1(%rsp), %eax
+; SSE3-NEXT: pushq %rbx
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE3-NEXT: bsrl %eax, %eax
; SSE3-NEXT: xorl $7, %eax
; SSE3-NEXT: movd %eax, %xmm0
-; SSE3-NEXT: movzbl -2(%rsp), %edi
-; SSE3-NEXT: movzbl -3(%rsp), %edx
-; SSE3-NEXT: movzbl -4(%rsp), %r9d
-; SSE3-NEXT: movzbl -5(%rsp), %eax
-; SSE3-NEXT: movzbl -6(%rsp), %r10d
-; SSE3-NEXT: movzbl -7(%rsp), %ecx
-; SSE3-NEXT: movzbl -8(%rsp), %r8d
-; SSE3-NEXT: movzbl -9(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSE3-NEXT: bsrl %esi, %esi
; SSE3-NEXT: xorl $7, %esi
; SSE3-NEXT: movd %esi, %xmm1
; SSE3-NEXT: bsrl %eax, %eax
; SSE3-NEXT: xorl $7, %eax
; SSE3-NEXT: movd %eax, %xmm0
-; SSE3-NEXT: movzbl -10(%rsp), %eax
-; SSE3-NEXT: movzbl -11(%rsp), %esi
-; SSE3-NEXT: movzbl -12(%rsp), %r11d
-; SSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
; SSE3-NEXT: bsrl %ebx, %ebx
; SSE3-NEXT: xorl $7, %ebx
; SSE3-NEXT: movd %ebx, %xmm2
; SSE3-NEXT: bsrl %ecx, %ecx
; SSE3-NEXT: xorl $7, %ecx
; SSE3-NEXT: movd %ecx, %xmm0
-; SSE3-NEXT: movzbl -14(%rsp), %ecx
-; SSE3-NEXT: movzbl -15(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSE3-NEXT: bsrl %edx, %edx
; SSE3-NEXT: xorl $7, %edx
; SSE3-NEXT: movd %edx, %xmm1
; SSE3-NEXT: bsrl %r8d, %eax
; SSE3-NEXT: xorl $7, %eax
; SSE3-NEXT: movd %eax, %xmm4
-; SSE3-NEXT: movzbl -16(%rsp), %eax
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE3-NEXT: bsrl %eax, %eax
; SSE3-NEXT: xorl $7, %eax
; SSE3-NEXT: movd %eax, %xmm0
;
; SSSE3-LABEL: testv16i8u:
; SSSE3: # BB#0:
-; SSSE3: pushq %rbx
-; SSSE3: movaps %xmm0, -16(%rsp)
-; SSSE3-NEXT: movzbl -1(%rsp), %eax
+; SSSE3-NEXT: pushq %rbx
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: bsrl %eax, %eax
; SSSE3-NEXT: xorl $7, %eax
; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: movzbl -2(%rsp), %edi
-; SSSE3-NEXT: movzbl -3(%rsp), %edx
-; SSSE3-NEXT: movzbl -4(%rsp), %r9d
-; SSSE3-NEXT: movzbl -5(%rsp), %eax
-; SSSE3-NEXT: movzbl -6(%rsp), %r10d
-; SSSE3-NEXT: movzbl -7(%rsp), %ecx
-; SSSE3-NEXT: movzbl -8(%rsp), %r8d
-; SSSE3-NEXT: movzbl -9(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSSE3-NEXT: bsrl %esi, %esi
; SSSE3-NEXT: xorl $7, %esi
; SSSE3-NEXT: movd %esi, %xmm1
; SSSE3-NEXT: bsrl %eax, %eax
; SSSE3-NEXT: xorl $7, %eax
; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: movzbl -10(%rsp), %eax
-; SSSE3-NEXT: movzbl -11(%rsp), %esi
-; SSSE3-NEXT: movzbl -12(%rsp), %r11d
-; SSSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
; SSSE3-NEXT: bsrl %ebx, %ebx
; SSSE3-NEXT: xorl $7, %ebx
; SSSE3-NEXT: movd %ebx, %xmm2
; SSSE3-NEXT: bsrl %ecx, %ecx
; SSSE3-NEXT: xorl $7, %ecx
; SSSE3-NEXT: movd %ecx, %xmm0
-; SSSE3-NEXT: movzbl -14(%rsp), %ecx
-; SSSE3-NEXT: movzbl -15(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSSE3-NEXT: bsrl %edx, %edx
; SSSE3-NEXT: xorl $7, %edx
; SSSE3-NEXT: movd %edx, %xmm1
; SSSE3-NEXT: bsrl %r8d, %eax
; SSSE3-NEXT: xorl $7, %eax
; SSSE3-NEXT: movd %eax, %xmm4
-; SSSE3-NEXT: movzbl -16(%rsp), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: bsrl %eax, %eax
; SSSE3-NEXT: xorl $7, %eax
; SSSE3-NEXT: movd %eax, %xmm0
ret <16 x i8> %out
}
-define <2 x i64> @foldv2i64() {
+define <2 x i64> @foldv2i64() nounwind {
; SSE-LABEL: foldv2i64:
; SSE: # BB#0:
; SSE-NEXT: movl $55, %eax
ret <2 x i64> %out
}
-define <2 x i64> @foldv2i64u() {
+define <2 x i64> @foldv2i64u() nounwind {
; SSE-LABEL: foldv2i64u:
; SSE: # BB#0:
; SSE-NEXT: movl $55, %eax
ret <2 x i64> %out
}
-define <4 x i32> @foldv4i32() {
+define <4 x i32> @foldv4i32() nounwind {
; SSE-LABEL: foldv4i32:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
ret <4 x i32> %out
}
-define <4 x i32> @foldv4i32u() {
+define <4 x i32> @foldv4i32u() nounwind {
; SSE-LABEL: foldv4i32u:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
ret <4 x i32> %out
}
-define <8 x i16> @foldv8i16() {
+define <8 x i16> @foldv8i16() nounwind {
; SSE-LABEL: foldv8i16:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
ret <8 x i16> %out
}
-define <8 x i16> @foldv8i16u() {
+define <8 x i16> @foldv8i16u() nounwind {
; SSE-LABEL: foldv8i16u:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
ret <8 x i16> %out
}
-define <16 x i8> @foldv16i8() {
+define <16 x i8> @foldv16i8() nounwind {
; SSE-LABEL: foldv16i8:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
ret <16 x i8> %out
}
-define <16 x i8> @foldv16i8u() {
+define <16 x i8> @foldv16i8u() nounwind {
; SSE-LABEL: foldv16i8u:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
target triple = "x86_64-unknown-unknown"
-define <4 x i64> @testv4i64(<4 x i64> %in) {
+define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <4 x i64> %out
}
-define <4 x i64> @testv4i64u(<4 x i64> %in) {
+define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <4 x i64> %out
}
-define <8 x i32> @testv8i32(<8 x i32> %in) {
+define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <8 x i32> %out
}
-define <8 x i32> @testv8i32u(<8 x i32> %in) {
+define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <8 x i32> %out
}
-define <16 x i16> @testv16i16(<16 x i16> %in) {
+define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <16 x i16> %out
}
-define <16 x i16> @testv16i16u(<16 x i16> %in) {
+define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <16 x i16> %out
}
-define <32 x i8> @testv32i8(<32 x i8> %in) {
+define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <32 x i8> %out
}
-define <32 x i8> @testv32i8u(<32 x i8> %in) {
+define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <32 x i8> %out
}
-define <4 x i64> @foldv4i64() {
-; AVX-LABEL: foldv4i64:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; AVX-NEXT: retq
+define <4 x i64> @foldv4i64() nounwind {
+; ALL-LABEL: foldv4i64:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; ALL-NEXT: retq
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
}
-define <4 x i64> @foldv4i64u() {
-; AVX-LABEL: foldv4i64u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; AVX-NEXT: retq
+define <4 x i64> @foldv4i64u() nounwind {
+; ALL-LABEL: foldv4i64u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; ALL-NEXT: retq
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
}
-define <8 x i32> @foldv8i32() {
-; AVX-LABEL: foldv8i32:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; AVX-NEXT: retq
+define <8 x i32> @foldv8i32() nounwind {
+; ALL-LABEL: foldv8i32:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; ALL-NEXT: retq
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
ret <8 x i32> %out
}
-define <8 x i32> @foldv8i32u() {
-; AVX-LABEL: foldv8i32u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; AVX-NEXT: retq
+define <8 x i32> @foldv8i32u() nounwind {
+; ALL-LABEL: foldv8i32u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; ALL-NEXT: retq
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
ret <8 x i32> %out
}
-define <16 x i16> @foldv16i16() {
-; AVX-LABEL: foldv16i16:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; AVX-NEXT: retq
+define <16 x i16> @foldv16i16() nounwind {
+; ALL-LABEL: foldv16i16:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; ALL-NEXT: retq
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
ret <16 x i16> %out
}
-define <16 x i16> @foldv16i16u() {
-; AVX-LABEL: foldv16i16u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; AVX-NEXT: retq
+define <16 x i16> @foldv16i16u() nounwind {
+; ALL-LABEL: foldv16i16u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; ALL-NEXT: retq
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
ret <16 x i16> %out
}
-define <32 x i8> @foldv32i8() {
-; AVX-LABEL: foldv32i8:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; AVX-NEXT: retq
+define <32 x i8> @foldv32i8() nounwind {
+; ALL-LABEL: foldv32i8:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; ALL-NEXT: retq
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
ret <32 x i8> %out
}
-define <32 x i8> @foldv32i8u() {
-; AVX-LABEL: foldv32i8u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; AVX-NEXT: retq
+define <32 x i8> @foldv32i8u() nounwind {
+; ALL-LABEL: foldv32i8u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; ALL-NEXT: retq
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
ret <32 x i8> %out
}
target triple = "x86_64-unknown-unknown"
-define <2 x i64> @testv2i64(<2 x i64> %in) {
+define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %rax
ret <2 x i64> %out
}
-define <2 x i64> @testv2i64u(<2 x i64> %in) {
+define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64u:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %rax
ret <2 x i64> %out
}
-define <4 x i32> @testv4i32(<4 x i32> %in) {
+define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
ret <4 x i32> %out
}
-define <4 x i32> @testv4i32u(<4 x i32> %in) {
+define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32u:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
ret <4 x i32> %out
}
-define <8 x i16> @testv8i16(<8 x i16> %in) {
+define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16:
; SSE2: # BB#0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
ret <8 x i16> %out
}
-define <8 x i16> @testv8i16u(<8 x i16> %in) {
+define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16u:
; SSE2: # BB#0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
;
; SSE41-LABEL: testv8i16u:
; SSE41: # BB#0:
-; SSE41-NEXT: pextrw $1, %xmm0, %eax
-; SSE41-NEXT: bsfw %ax, %ax
-; SSE41-NEXT: movd %xmm0, %ecx
-; SSE41-NEXT: bsfw %cx, %cx
-; SSE41-NEXT: movd %ecx, %xmm1
-; SSE41-NEXT: pinsrw $1, %eax, %xmm1
-; SSE41-NEXT: pextrw $2, %xmm0, %eax
-; SSE41-NEXT: bsfw %ax, %ax
-; SSE41-NEXT: pinsrw $2, %eax, %xmm1
-; SSE41-NEXT: pextrw $3, %xmm0, %eax
-; SSE41-NEXT: bsfw %ax, %ax
-; SSE41-NEXT: pinsrw $3, %eax, %xmm1
-; SSE41-NEXT: pextrw $4, %xmm0, %eax
-; SSE41-NEXT: bsfw %ax, %ax
-; SSE41-NEXT: pinsrw $4, %eax, %xmm1
-; SSE41-NEXT: pextrw $5, %xmm0, %eax
-; SSE41-NEXT: bsfw %ax, %ax
-; SSE41-NEXT: pinsrw $5, %eax, %xmm1
-; SSE41-NEXT: pextrw $6, %xmm0, %eax
-; SSE41-NEXT: bsfw %ax, %ax
-; SSE41-NEXT: pinsrw $6, %eax, %xmm1
-; SSE41-NEXT: pextrw $7, %xmm0, %eax
-; SSE41-NEXT: bsfw %ax, %ax
-; SSE41-NEXT: pinsrw $7, %eax, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE41-NEXT: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: movd %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrw $1, %eax, %xmm1
+; SSE41-NEXT: pextrw $2, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $2, %eax, %xmm1
+; SSE41-NEXT: pextrw $3, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $3, %eax, %xmm1
+; SSE41-NEXT: pextrw $4, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $4, %eax, %xmm1
+; SSE41-NEXT: pextrw $5, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $5, %eax, %xmm1
+; SSE41-NEXT: pextrw $6, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $6, %eax, %xmm1
+; SSE41-NEXT: pextrw $7, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $7, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: testv8i16u:
; AVX: # BB#0:
ret <8 x i16> %out
}
-define <16 x i8> @testv16i8(<16 x i8> %in) {
+define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8:
; SSE2: # BB#0:
-; SSE2: pushq %rbp
-; SSE2: pushq %r14
-; SSE2: pushq %rbx
-; SSE2: movaps %xmm0, -16(%rsp)
-; SSE2-NEXT: movzbl -1(%rsp), %eax
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: bsfl %eax, %edx
; SSE2-NEXT: movl $32, %eax
; SSE2-NEXT: cmovel %eax, %edx
; SSE2-NEXT: movl $8, %ecx
; SSE2-NEXT: cmovel %ecx, %edx
; SSE2-NEXT: movd %edx, %xmm0
-; SSE2-NEXT: movzbl -2(%rsp), %r14d
-; SSE2-NEXT: movzbl -3(%rsp), %ebx
-; SSE2-NEXT: movzbl -4(%rsp), %r9d
-; SSE2-NEXT: movzbl -5(%rsp), %edi
-; SSE2-NEXT: movzbl -6(%rsp), %r11d
-; SSE2-NEXT: movzbl -7(%rsp), %edx
-; SSE2-NEXT: movzbl -8(%rsp), %r8d
-; SSE2-NEXT: movzbl -9(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSE2-NEXT: bsfl %esi, %esi
; SSE2-NEXT: cmovel %eax, %esi
; SSE2-NEXT: cmpl $32, %esi
; SSE2-NEXT: cmpl $32, %esi
; SSE2-NEXT: cmovel %ecx, %esi
; SSE2-NEXT: movd %esi, %xmm2
-; SSE2-NEXT: movzbl -10(%rsp), %edi
-; SSE2-NEXT: movzbl -11(%rsp), %esi
-; SSE2-NEXT: movzbl -12(%rsp), %r10d
-; SSE2-NEXT: movzbl -13(%rsp), %ebp
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
; SSE2-NEXT: bsfl %ebp, %ebp
; SSE2-NEXT: cmovel %eax, %ebp
; SSE2-NEXT: cmpl $32, %ebp
; SSE2-NEXT: cmpl $32, %edx
; SSE2-NEXT: cmovel %ecx, %edx
; SSE2-NEXT: movd %edx, %xmm3
-; SSE2-NEXT: movzbl -14(%rsp), %edx
-; SSE2-NEXT: movzbl -15(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSE2-NEXT: bsfl %esi, %esi
; SSE2-NEXT: cmovel %eax, %esi
; SSE2-NEXT: cmpl $32, %esi
; SSE2-NEXT: cmpl $32, %edx
; SSE2-NEXT: cmovel %ecx, %edx
; SSE2-NEXT: movd %edx, %xmm4
-; SSE2-NEXT: movzbl -16(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSE2-NEXT: bsfl %edx, %edx
; SSE2-NEXT: cmovel %eax, %edx
; SSE2-NEXT: cmpl $32, %edx
;
; SSE3-LABEL: testv16i8:
; SSE3: # BB#0:
-; SSE3: pushq %rbp
-; SSE3: pushq %r14
-; SSE3: pushq %rbx
-; SSE3: movaps %xmm0, -16(%rsp)
-; SSE3-NEXT: movzbl -1(%rsp), %eax
+; SSE3-NEXT: pushq %rbp
+; SSE3-NEXT: pushq %r14
+; SSE3-NEXT: pushq %rbx
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE3-NEXT: bsfl %eax, %edx
; SSE3-NEXT: movl $32, %eax
; SSE3-NEXT: cmovel %eax, %edx
; SSE3-NEXT: movl $8, %ecx
; SSE3-NEXT: cmovel %ecx, %edx
; SSE3-NEXT: movd %edx, %xmm0
-; SSE3-NEXT: movzbl -2(%rsp), %r14d
-; SSE3-NEXT: movzbl -3(%rsp), %ebx
-; SSE3-NEXT: movzbl -4(%rsp), %r9d
-; SSE3-NEXT: movzbl -5(%rsp), %edi
-; SSE3-NEXT: movzbl -6(%rsp), %r11d
-; SSE3-NEXT: movzbl -7(%rsp), %edx
-; SSE3-NEXT: movzbl -8(%rsp), %r8d
-; SSE3-NEXT: movzbl -9(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSE3-NEXT: bsfl %esi, %esi
; SSE3-NEXT: cmovel %eax, %esi
; SSE3-NEXT: cmpl $32, %esi
; SSE3-NEXT: cmpl $32, %esi
; SSE3-NEXT: cmovel %ecx, %esi
; SSE3-NEXT: movd %esi, %xmm2
-; SSE3-NEXT: movzbl -10(%rsp), %edi
-; SSE3-NEXT: movzbl -11(%rsp), %esi
-; SSE3-NEXT: movzbl -12(%rsp), %r10d
-; SSE3-NEXT: movzbl -13(%rsp), %ebp
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
; SSE3-NEXT: bsfl %ebp, %ebp
; SSE3-NEXT: cmovel %eax, %ebp
; SSE3-NEXT: cmpl $32, %ebp
; SSE3-NEXT: cmpl $32, %edx
; SSE3-NEXT: cmovel %ecx, %edx
; SSE3-NEXT: movd %edx, %xmm3
-; SSE3-NEXT: movzbl -14(%rsp), %edx
-; SSE3-NEXT: movzbl -15(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSE3-NEXT: bsfl %esi, %esi
; SSE3-NEXT: cmovel %eax, %esi
; SSE3-NEXT: cmpl $32, %esi
; SSE3-NEXT: cmpl $32, %edx
; SSE3-NEXT: cmovel %ecx, %edx
; SSE3-NEXT: movd %edx, %xmm4
-; SSE3-NEXT: movzbl -16(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSE3-NEXT: bsfl %edx, %edx
; SSE3-NEXT: cmovel %eax, %edx
; SSE3-NEXT: cmpl $32, %edx
;
; SSSE3-LABEL: testv16i8:
; SSSE3: # BB#0:
-; SSSE3: pushq %rbp
-; SSSE3: pushq %r14
-; SSSE3: pushq %rbx
-; SSSE3: movaps %xmm0, -16(%rsp)
-; SSSE3-NEXT: movzbl -1(%rsp), %eax
+; SSSE3-NEXT: pushq %rbp
+; SSSE3-NEXT: pushq %r14
+; SSSE3-NEXT: pushq %rbx
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: bsfl %eax, %edx
; SSSE3-NEXT: movl $32, %eax
; SSSE3-NEXT: cmovel %eax, %edx
; SSSE3-NEXT: movl $8, %ecx
; SSSE3-NEXT: cmovel %ecx, %edx
; SSSE3-NEXT: movd %edx, %xmm0
-; SSSE3-NEXT: movzbl -2(%rsp), %r14d
-; SSSE3-NEXT: movzbl -3(%rsp), %ebx
-; SSSE3-NEXT: movzbl -4(%rsp), %r9d
-; SSSE3-NEXT: movzbl -5(%rsp), %edi
-; SSSE3-NEXT: movzbl -6(%rsp), %r11d
-; SSSE3-NEXT: movzbl -7(%rsp), %edx
-; SSSE3-NEXT: movzbl -8(%rsp), %r8d
-; SSSE3-NEXT: movzbl -9(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSSE3-NEXT: bsfl %esi, %esi
; SSSE3-NEXT: cmovel %eax, %esi
; SSSE3-NEXT: cmpl $32, %esi
; SSSE3-NEXT: cmpl $32, %esi
; SSSE3-NEXT: cmovel %ecx, %esi
; SSSE3-NEXT: movd %esi, %xmm2
-; SSSE3-NEXT: movzbl -10(%rsp), %edi
-; SSSE3-NEXT: movzbl -11(%rsp), %esi
-; SSSE3-NEXT: movzbl -12(%rsp), %r10d
-; SSSE3-NEXT: movzbl -13(%rsp), %ebp
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
; SSSE3-NEXT: bsfl %ebp, %ebp
; SSSE3-NEXT: cmovel %eax, %ebp
; SSSE3-NEXT: cmpl $32, %ebp
; SSSE3-NEXT: cmpl $32, %edx
; SSSE3-NEXT: cmovel %ecx, %edx
; SSSE3-NEXT: movd %edx, %xmm3
-; SSSE3-NEXT: movzbl -14(%rsp), %edx
-; SSSE3-NEXT: movzbl -15(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
; SSSE3-NEXT: bsfl %esi, %esi
; SSSE3-NEXT: cmovel %eax, %esi
; SSSE3-NEXT: cmpl $32, %esi
; SSSE3-NEXT: cmpl $32, %edx
; SSSE3-NEXT: cmovel %ecx, %edx
; SSSE3-NEXT: movd %edx, %xmm4
-; SSSE3-NEXT: movzbl -16(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSSE3-NEXT: bsfl %edx, %edx
; SSSE3-NEXT: cmovel %eax, %edx
; SSSE3-NEXT: cmpl $32, %edx
ret <16 x i8> %out
}
-define <16 x i8> @testv16i8u(<16 x i8> %in) {
+define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8u:
; SSE2: # BB#0:
-; SSE2: pushq %rbx
-; SSE2: movaps %xmm0, -16(%rsp)
-; SSE2-NEXT: movzbl -1(%rsp), %eax
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: bsfl %eax, %eax
; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movzbl -2(%rsp), %r11d
-; SSE2-NEXT: movzbl -3(%rsp), %eax
-; SSE2-NEXT: movzbl -4(%rsp), %r9d
-; SSE2-NEXT: movzbl -5(%rsp), %edi
-; SSE2-NEXT: movzbl -6(%rsp), %r10d
-; SSE2-NEXT: movzbl -7(%rsp), %ecx
-; SSE2-NEXT: movzbl -8(%rsp), %r8d
-; SSE2-NEXT: movzbl -9(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSE2-NEXT: bsfl %edx, %edx
; SSE2-NEXT: movd %edx, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: bsfl %edi, %edx
; SSE2-NEXT: movd %edx, %xmm0
-; SSE2-NEXT: movzbl -10(%rsp), %edx
-; SSE2-NEXT: movzbl -11(%rsp), %esi
-; SSE2-NEXT: movzbl -12(%rsp), %edi
-; SSE2-NEXT: movzbl -13(%rsp), %ebx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
; SSE2-NEXT: bsfl %ebx, %ebx
; SSE2-NEXT: movd %ebx, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: bsfl %esi, %eax
; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: punpcklbw %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE2-NEXT: bsfl %ecx, %eax
; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movzbl -14(%rsp), %eax
-; SSE2-NEXT: movzbl -15(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE2-NEXT: bsfl %ecx, %ecx
; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: bsfl %r8d, %eax
; SSE2-NEXT: movd %eax, %xmm4
-; SSE2-NEXT: movzbl -16(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: bsfl %eax, %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
;
; SSE3-LABEL: testv16i8u:
; SSE3: # BB#0:
-; SSE3: pushq %rbx
-; SSE3: movaps %xmm0, -16(%rsp)
-; SSE3-NEXT: movzbl -1(%rsp), %eax
+; SSE3-NEXT: pushq %rbx
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE3-NEXT: bsfl %eax, %eax
; SSE3-NEXT: movd %eax, %xmm0
-; SSE3-NEXT: movzbl -2(%rsp), %r11d
-; SSE3-NEXT: movzbl -3(%rsp), %eax
-; SSE3-NEXT: movzbl -4(%rsp), %r9d
-; SSE3-NEXT: movzbl -5(%rsp), %edi
-; SSE3-NEXT: movzbl -6(%rsp), %r10d
-; SSE3-NEXT: movzbl -7(%rsp), %ecx
-; SSE3-NEXT: movzbl -8(%rsp), %r8d
-; SSE3-NEXT: movzbl -9(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSE3-NEXT: bsfl %edx, %edx
; SSE3-NEXT: movd %edx, %xmm1
; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE3-NEXT: bsfl %edi, %edx
; SSE3-NEXT: movd %edx, %xmm0
-; SSE3-NEXT: movzbl -10(%rsp), %edx
-; SSE3-NEXT: movzbl -11(%rsp), %esi
-; SSE3-NEXT: movzbl -12(%rsp), %edi
-; SSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
; SSE3-NEXT: bsfl %ebx, %ebx
; SSE3-NEXT: movd %ebx, %xmm2
; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE3-NEXT: movd %eax, %xmm0
; SSE3-NEXT: bsfl %esi, %eax
; SSE3-NEXT: movd %eax, %xmm3
-; SSE3-NEXT: punpcklbw %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE3-NEXT: bsfl %ecx, %eax
; SSE3-NEXT: movd %eax, %xmm0
-; SSE3-NEXT: movzbl -14(%rsp), %eax
-; SSE3-NEXT: movzbl -15(%rsp), %ecx
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSE3-NEXT: bsfl %ecx, %ecx
; SSE3-NEXT: movd %ecx, %xmm1
; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE3-NEXT: bsfl %r8d, %eax
; SSE3-NEXT: movd %eax, %xmm4
-; SSE3-NEXT: movzbl -16(%rsp), %eax
+; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE3-NEXT: bsfl %eax, %eax
; SSE3-NEXT: movd %eax, %xmm0
; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
;
; SSSE3-LABEL: testv16i8u:
; SSSE3: # BB#0:
-; SSSE3: pushq %rbx
-; SSSE3: movaps %xmm0, -16(%rsp)
-; SSSE3-NEXT: movzbl -1(%rsp), %eax
+; SSSE3-NEXT: pushq %rbx
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: bsfl %eax, %eax
; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: movzbl -2(%rsp), %r11d
-; SSSE3-NEXT: movzbl -3(%rsp), %eax
-; SSSE3-NEXT: movzbl -4(%rsp), %r9d
-; SSSE3-NEXT: movzbl -5(%rsp), %edi
-; SSSE3-NEXT: movzbl -6(%rsp), %r10d
-; SSSE3-NEXT: movzbl -7(%rsp), %ecx
-; SSSE3-NEXT: movzbl -8(%rsp), %r8d
-; SSSE3-NEXT: movzbl -9(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
; SSSE3-NEXT: bsfl %edx, %edx
; SSSE3-NEXT: movd %edx, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: bsfl %edi, %edx
; SSSE3-NEXT: movd %edx, %xmm0
-; SSSE3-NEXT: movzbl -10(%rsp), %edx
-; SSSE3-NEXT: movzbl -11(%rsp), %esi
-; SSSE3-NEXT: movzbl -12(%rsp), %edi
-; SSSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
; SSSE3-NEXT: bsfl %ebx, %ebx
; SSSE3-NEXT: movd %ebx, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: bsfl %esi, %eax
; SSSE3-NEXT: movd %eax, %xmm3
-; SSSE3-NEXT: punpcklbw %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSSE3-NEXT: bsfl %ecx, %eax
; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: movzbl -14(%rsp), %eax
-; SSSE3-NEXT: movzbl -15(%rsp), %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; SSSE3-NEXT: bsfl %ecx, %ecx
; SSSE3-NEXT: movd %ecx, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSSE3-NEXT: bsfl %r8d, %eax
; SSSE3-NEXT: movd %eax, %xmm4
-; SSSE3-NEXT: movzbl -16(%rsp), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: bsfl %eax, %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
ret <16 x i8> %out
}
-define <2 x i64> @foldv2i64() {
+define <2 x i64> @foldv2i64() nounwind {
; SSE-LABEL: foldv2i64:
; SSE: # BB#0:
; SSE-NEXT: movl $8, %eax
ret <2 x i64> %out
}
-define <2 x i64> @foldv2i64u() {
+define <2 x i64> @foldv2i64u() nounwind {
; SSE-LABEL: foldv2i64u:
; SSE: # BB#0:
; SSE-NEXT: movl $8, %eax
ret <2 x i64> %out
}
-define <4 x i32> @foldv4i32() {
+define <4 x i32> @foldv4i32() nounwind {
; SSE-LABEL: foldv4i32:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
ret <4 x i32> %out
}
-define <4 x i32> @foldv4i32u() {
+define <4 x i32> @foldv4i32u() nounwind {
; SSE-LABEL: foldv4i32u:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
ret <4 x i32> %out
}
-define <8 x i16> @foldv8i16() {
+define <8 x i16> @foldv8i16() nounwind {
; SSE-LABEL: foldv8i16:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
ret <8 x i16> %out
}
-define <8 x i16> @foldv8i16u() {
+define <8 x i16> @foldv8i16u() nounwind {
; SSE-LABEL: foldv8i16u:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
ret <8 x i16> %out
}
-define <16 x i8> @foldv16i8() {
+define <16 x i8> @foldv16i8() nounwind {
; SSE-LABEL: foldv16i8:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
ret <16 x i8> %out
}
-define <16 x i8> @foldv16i8u() {
+define <16 x i8> @foldv16i8u() nounwind {
; SSE-LABEL: foldv16i8u:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
target triple = "x86_64-unknown-unknown"
-define <4 x i64> @testv4i64(<4 x i64> %in) {
+define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <4 x i64> %out
}
-define <4 x i64> @testv4i64u(<4 x i64> %in) {
+define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <4 x i64> %out
}
-define <8 x i32> @testv8i32(<8 x i32> %in) {
+define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <8 x i32> %out
}
-define <8 x i32> @testv8i32u(<8 x i32> %in) {
+define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <8 x i32> %out
}
-define <16 x i16> @testv16i16(<16 x i16> %in) {
+define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <16 x i16> %out
}
-define <16 x i16> @testv16i16u(<16 x i16> %in) {
+define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <16 x i16> %out
}
-define <32 x i8> @testv32i8(<32 x i8> %in) {
+define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <32 x i8> %out
}
-define <32 x i8> @testv32i8u(<32 x i8> %in) {
+define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
ret <32 x i8> %out
}
-define <4 x i64> @foldv4i64() {
-; AVX-LABEL: foldv4i64:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
-; AVX-NEXT: retq
+define <4 x i64> @foldv4i64() nounwind {
+; ALL-LABEL: foldv4i64:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; ALL-NEXT: retq
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
}
-define <4 x i64> @foldv4i64u() {
-; AVX-LABEL: foldv4i64u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
-; AVX-NEXT: retq
+define <4 x i64> @foldv4i64u() nounwind {
+; ALL-LABEL: foldv4i64u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; ALL-NEXT: retq
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
}
-define <8 x i32> @foldv8i32() {
-; AVX-LABEL: foldv8i32:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
-; AVX-NEXT: retq
+define <8 x i32> @foldv8i32() nounwind {
+; ALL-LABEL: foldv8i32:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; ALL-NEXT: retq
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
ret <8 x i32> %out
}
-define <8 x i32> @foldv8i32u() {
-; AVX-LABEL: foldv8i32u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
-; AVX-NEXT: retq
+define <8 x i32> @foldv8i32u() nounwind {
+; ALL-LABEL: foldv8i32u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; ALL-NEXT: retq
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
ret <8 x i32> %out
}
-define <16 x i16> @foldv16i16() {
-; AVX-LABEL: foldv16i16:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
-; AVX-NEXT: retq
+define <16 x i16> @foldv16i16() nounwind {
+; ALL-LABEL: foldv16i16:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; ALL-NEXT: retq
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
ret <16 x i16> %out
}
-define <16 x i16> @foldv16i16u() {
-; AVX-LABEL: foldv16i16u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
-; AVX-NEXT: retq
+define <16 x i16> @foldv16i16u() nounwind {
+; ALL-LABEL: foldv16i16u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; ALL-NEXT: retq
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
ret <16 x i16> %out
}
-define <32 x i8> @foldv32i8() {
-; AVX-LABEL: foldv32i8:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
-; AVX-NEXT: retq
+define <32 x i8> @foldv32i8() nounwind {
+; ALL-LABEL: foldv32i8:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; ALL-NEXT: retq
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
ret <32 x i8> %out
}
-define <32 x i8> @foldv32i8u() {
-; AVX-LABEL: foldv32i8u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
-; AVX-NEXT: retq
+define <32 x i8> @foldv32i8u() nounwind {
+; ALL-LABEL: foldv32i8u:
+; ALL: # BB#0:
+; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; ALL-NEXT: retq
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
ret <32 x i8> %out
}