2 ; RUN: llc -asm-verbose=false < %s | FileCheck %s -check-prefix=X64
3 ; RUN: llc -march=x86 -asm-verbose=false < %s | FileCheck %s -check-prefix=X32
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
5 target triple = "x86_64-apple-darwin10.2"
7 define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
9 %A = load i32* %a0, align 4
10 %B = and i32 %A, -256 ; 0xFFFFFF00
11 %C = zext i8 %a1 to i32
13 store i32 %D, i32* %a0, align 4
17 ; X64: movb %sil, (%rdi)
20 ; X32: movb 8(%esp), %al
21 ; X32: movb %al, (%{{.*}})
24 define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
26 %A = load i32* %a0, align 4
27 %B = and i32 %A, -65281 ; 0xFFFF00FF
28 %C = zext i8 %a1 to i32
31 store i32 %D, i32* %a0, align 4
34 ; X64: movb %sil, 1(%rdi)
37 ; X32: movb 8(%esp), %al
38 ; X32: movb %al, 1(%{{.*}})
41 define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
43 %A = load i32* %a0, align 4
44 %B = and i32 %A, -65536 ; 0xFFFF0000
45 %C = zext i16 %a1 to i32
47 store i32 %D, i32* %a0, align 4
50 ; X64: movw %si, (%rdi)
53 ; X32: movw 8(%esp), %ax
54 ; X32: movw %ax, (%{{.*}})
57 define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
59 %A = load i32* %a0, align 4
60 %B = and i32 %A, 65535 ; 0x0000FFFF
61 %C = zext i16 %a1 to i32
64 store i32 %D, i32* %a0, align 4
67 ; X64: movw %si, 2(%rdi)
70 ; X32: movl 8(%esp), %eax
71 ; X32: movw %ax, 2(%{{.*}})
74 define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp {
76 %A = load i64* %a0, align 4
77 %B = and i64 %A, -4294901761 ; 0xFFFFFFFF0000FFFF
78 %C = zext i16 %a1 to i64
81 store i64 %D, i64* %a0, align 4
84 ; X64: movw %si, 2(%rdi)
87 ; X32: movzwl 8(%esp), %eax
88 ; X32: movw %ax, 2(%{{.*}})
91 define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp {
93 %A = load i64* %a0, align 4
94 %B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF
95 %C = zext i8 %a1 to i64
98 store i64 %D, i64* %a0, align 4
101 ; X64: movb %sil, 5(%rdi)
105 ; X32: movb 8(%esp), %al
106 ; X32: movb %al, 5(%{{.*}})
109 define i32 @test7(i64* nocapture %a0, i8 zeroext %a1, i32* %P2) nounwind {
111 %OtherLoad = load i32 *%P2
112 %A = load i64* %a0, align 4
113 %B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF
114 %C = zext i8 %a1 to i64
117 store i64 %D, i64* %a0, align 4
120 ; X64: movb %sil, 5(%rdi)
124 ; X32: movb 8(%esp), %cl
125 ; X32: movb %cl, 5(%{{.*}})
130 @g_16 = internal global i32 -1
133 ; X64-NEXT: movl _g_16(%rip), %eax
134 ; X64-NEXT: movl $0, _g_16(%rip)
135 ; X64-NEXT: orl $1, %eax
136 ; X64-NEXT: movl %eax, _g_16(%rip)
138 define void @test8() nounwind {
139 %tmp = load i32* @g_16
140 store i32 0, i32* @g_16
142 store i32 %or, i32* @g_16
147 ; X64-NEXT: orb $1, _g_16(%rip)
149 define void @test9() nounwind {
150 %tmp = load i32* @g_16
152 store i32 %or, i32* @g_16
156 ; rdar://8494845 + PR8244
158 ; X64-NEXT: movsbl (%rdi), %eax
159 ; X64-NEXT: shrl $8, %eax
161 define i8 @test10(i8* %P) nounwind ssp {
163 %tmp = load i8* %P, align 1
164 %conv = sext i8 %tmp to i32
165 %shr3 = lshr i32 %conv, 8
166 %conv2 = trunc i32 %shr3 to i8