1 ; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64
2 ; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
3 ; RUN: llc < %s -mattr=-bmi -march=x86 | FileCheck %s -check-prefix=X86-32
5 ; Use h registers. On x86-64, codegen doesn't support general allocation
6 ; of h registers yet, due to x86 encoding complications.
8 define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
10 ; X86-64: shrq $8, %rdi
13 ; See FIXME: on regclass GR8.
14 ; It could be optimally transformed like; incb %ch; movb %ch, (%rdx)
16 ; WIN64: shrq $8, %rcx
19 ; X86-32-LABEL: bar64:
22 %t1 = trunc i64 %t0 to i8
28 define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
29 ; X86-64-LABEL: bar32:
30 ; X86-64: shrl $8, %edi
34 ; WIN64: shrl $8, %ecx
37 ; X86-32-LABEL: bar32:
40 %t1 = trunc i32 %t0 to i8
46 define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
47 ; X86-64-LABEL: bar16:
48 ; X86-64: shrl $8, %edi
52 ; WIN64: shrl $8, %ecx
55 ; X86-32-LABEL: bar16:
58 %t1 = trunc i16 %t0 to i8
64 define i64 @qux64(i64 inreg %x) nounwind {
65 ; X86-64-LABEL: qux64:
66 ; X86-64: movq %rdi, %rax
67 ; X86-64: movzbl %ah, %eax
70 ; WIN64: movzbl %ch, %eax
72 ; X86-32-LABEL: qux64:
73 ; X86-32: movzbl %ah, %eax
75 %t1 = and i64 %t0, 255
79 define i32 @qux32(i32 inreg %x) nounwind {
80 ; X86-64-LABEL: qux32:
81 ; X86-64: movl %edi, %eax
82 ; X86-64: movzbl %ah, %eax
85 ; WIN64: movzbl %ch, %eax
87 ; X86-32-LABEL: qux32:
88 ; X86-32: movzbl %ah, %eax
90 %t1 = and i32 %t0, 255
94 define i16 @qux16(i16 inreg %x) nounwind {
95 ; X86-64-LABEL: qux16:
96 ; X86-64: movl %edi, %eax
97 ; X86-64: movzbl %ah, %eax
100 ; WIN64: movzbl %ch, %eax
102 ; X86-32-LABEL: qux16:
103 ; X86-32: movzbl %ah, %eax