1 ; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
3 define i64 @t0(<1 x i64>* %a, i32* %b) {
5 ; CHECK: ## BB#0: ## %entry
6 ; CHECK-NEXT: movq (%rdi), %mm0
7 ; CHECK-NEXT: psllq (%rsi), %mm0
8 ; CHECK-NEXT: movd %mm0, %rax
11 %0 = bitcast <1 x i64>* %a to x86_mmx*
12 %1 = load x86_mmx* %0, align 8
13 %2 = load i32* %b, align 4
14 %3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
15 %4 = bitcast x86_mmx %3 to i64
18 declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
20 define i64 @t1(<1 x i64>* %a, i32* %b) {
22 ; CHECK: ## BB#0: ## %entry
23 ; CHECK-NEXT: movq (%rdi), %mm0
24 ; CHECK-NEXT: psrlq (%rsi), %mm0
25 ; CHECK-NEXT: movd %mm0, %rax
28 %0 = bitcast <1 x i64>* %a to x86_mmx*
29 %1 = load x86_mmx* %0, align 8
30 %2 = load i32* %b, align 4
31 %3 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %1, i32 %2)
32 %4 = bitcast x86_mmx %3 to i64
35 declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32)
37 define i64 @t2(<1 x i64>* %a, i32* %b) {
39 ; CHECK: ## BB#0: ## %entry
40 ; CHECK-NEXT: movq (%rdi), %mm0
41 ; CHECK-NEXT: psllw (%rsi), %mm0
42 ; CHECK-NEXT: movd %mm0, %rax
45 %0 = bitcast <1 x i64>* %a to x86_mmx*
46 %1 = load x86_mmx* %0, align 8
47 %2 = load i32* %b, align 4
48 %3 = tail call x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %1, i32 %2)
49 %4 = bitcast x86_mmx %3 to i64
52 declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32)
54 define i64 @t3(<1 x i64>* %a, i32* %b) {
56 ; CHECK: ## BB#0: ## %entry
57 ; CHECK-NEXT: movq (%rdi), %mm0
58 ; CHECK-NEXT: psrlw (%rsi), %mm0
59 ; CHECK-NEXT: movd %mm0, %rax
62 %0 = bitcast <1 x i64>* %a to x86_mmx*
63 %1 = load x86_mmx* %0, align 8
64 %2 = load i32* %b, align 4
65 %3 = tail call x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %1, i32 %2)
66 %4 = bitcast x86_mmx %3 to i64
69 declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32)
71 define i64 @t4(<1 x i64>* %a, i32* %b) {
73 ; CHECK: ## BB#0: ## %entry
74 ; CHECK-NEXT: movq (%rdi), %mm0
75 ; CHECK-NEXT: pslld (%rsi), %mm0
76 ; CHECK-NEXT: movd %mm0, %rax
79 %0 = bitcast <1 x i64>* %a to x86_mmx*
80 %1 = load x86_mmx* %0, align 8
81 %2 = load i32* %b, align 4
82 %3 = tail call x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %1, i32 %2)
83 %4 = bitcast x86_mmx %3 to i64
86 declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32)
88 define i64 @t5(<1 x i64>* %a, i32* %b) {
90 ; CHECK: ## BB#0: ## %entry
91 ; CHECK-NEXT: movq (%rdi), %mm0
92 ; CHECK-NEXT: psrld (%rsi), %mm0
93 ; CHECK-NEXT: movd %mm0, %rax
96 %0 = bitcast <1 x i64>* %a to x86_mmx*
97 %1 = load x86_mmx* %0, align 8
98 %2 = load i32* %b, align 4
99 %3 = tail call x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %1, i32 %2)
100 %4 = bitcast x86_mmx %3 to i64
103 declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32)
105 define i64 @t6(<1 x i64>* %a, i32* %b) {
107 ; CHECK: ## BB#0: ## %entry
108 ; CHECK-NEXT: movq (%rdi), %mm0
109 ; CHECK-NEXT: psraw (%rsi), %mm0
110 ; CHECK-NEXT: movd %mm0, %rax
113 %0 = bitcast <1 x i64>* %a to x86_mmx*
114 %1 = load x86_mmx* %0, align 8
115 %2 = load i32* %b, align 4
116 %3 = tail call x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx %1, i32 %2)
117 %4 = bitcast x86_mmx %3 to i64
120 declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32)
122 define i64 @t7(<1 x i64>* %a, i32* %b) {
124 ; CHECK: ## BB#0: ## %entry
125 ; CHECK-NEXT: movq (%rdi), %mm0
126 ; CHECK-NEXT: psrad (%rsi), %mm0
127 ; CHECK-NEXT: movd %mm0, %rax
130 %0 = bitcast <1 x i64>* %a to x86_mmx*
131 %1 = load x86_mmx* %0, align 8
132 %2 = load i32* %b, align 4
133 %3 = tail call x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx %1, i32 %2)
134 %4 = bitcast x86_mmx %3 to i64
137 declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32)