1 ; RUN: llc < %s -march=sparcv9 | FileCheck %s
4 ; CHECK: or %g0, %i1, %i0
5 define i64 @ret2(i64 %a, i64 %b) {
10 ; CHECK: sllx %i0, 7, %i0
11 define i64 @shl_imm(i64 %a) {
17 ; CHECK: srax %i0, %i1, %i0
18 define i64 @sra_reg(i64 %a, i64 %b) {
23 ; Immediate materialization. Many of these patterns could actually be merged
24 ; into the restore instruction:
26 ; restore %g0, %g0, %o0
29 ; CHECK: or %g0, 0, %i0
30 define i64 @ret_imm0() {
35 ; CHECK: or %g0, -4096, %i0
36 define i64 @ret_simm13() {
44 define i64 @ret_sethi() {
49 ; CHECK: sethi 4, [[R:%[goli][0-7]]]
50 ; CHECK: or [[R]], 1, %i0
51 define i64 @ret_sethi_or() {
56 ; CHECK: sethi 4, [[R:%[goli][0-7]]]
57 ; CHECK: xor [[R]], -4, %i0
58 define i64 @ret_nimm33() {
65 define i64 @ret_bigimm() {
66 ret i64 6800754272627607872
70 ; CHECK: sethi 1048576
71 define i64 @ret_bigimm2() {
72 ret i64 4611686018427387904 ; 0x4000000000000000
76 ; CHECK: add %i0, %i1, [[R0:%[goli][0-7]]]
77 ; CHECK: sub [[R0]], %i2, [[R1:%[goli][0-7]]]
78 ; CHECK: andn [[R1]], %i0, %i0
79 define i64 @reg_reg_alu(i64 %x, i64 %y, i64 %z) {
88 ; CHECK: add %i0, -5, [[R0:%[goli][0-7]]]
89 ; CHECK: xor [[R0]], 2, %i0
90 define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) {
105 define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
108 store i64 %ai, i64* %p
110 %b2 = zext i32 %b to i64
111 %bi = trunc i64 %ai to i32
112 store i32 %bi, i32* %q
114 %c2 = sext i32 %c to i64
115 store i64 %ai, i64* %p
117 %d2 = sext i16 %d to i64
118 %di = trunc i64 %ai to i16
119 store i16 %di, i16* %s
121 %x1 = add i64 %a, %b2
122 %x2 = add i64 %c2, %d2
123 %x3 = add i64 %x1, %x2
128 ; CHECK: ldx [%i0+8], [[R:%[goli][0-7]]]
129 ; CHECK: stx [[R]], [%i0+16]
130 ; CHECK: st [[R]], [%i1+-8]
131 ; CHECK: sth [[R]], [%i2+40]
132 ; CHECK: stb [[R]], [%i3+-20]
133 define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
134 %p1 = getelementptr i64* %p, i64 1
135 %p2 = getelementptr i64* %p, i64 2
137 store i64 %pv, i64* %p2
139 %q2 = getelementptr i32* %q, i32 -2
140 %qv = trunc i64 %pv to i32
141 store i32 %qv, i32* %q2
143 %r2 = getelementptr i16* %r, i16 20
144 %rv = trunc i64 %pv to i16
145 store i16 %rv, i16* %r2
147 %s2 = getelementptr i8* %s, i8 -20
148 %sv = trunc i64 %pv to i8
149 store i8 %sv, i8* %s2
154 ; CHECK: promote_shifts
155 ; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
156 ; CHECK: sll [[R]], [[R]], %i0
157 define i8 @promote_shifts(i8* %p) {
160 %B36 = shl i8 %L24, %L32
165 ; CHECK: mulx %i0, %i1, %i0
166 define i64 @multiply(i64 %a, i64 %b) {
171 ; CHECK: signed_divide
172 ; CHECK: sdivx %i0, %i1, %i0
173 define i64 @signed_divide(i64 %a, i64 %b) {
178 ; CHECK: unsigned_divide
179 ; CHECK: udivx %i0, %i1, %i0
180 define i64 @unsigned_divide(i64 %a, i64 %b) {
185 define void @access_fi() {
187 %b = alloca [32 x i8], align 1
188 %arraydecay = getelementptr inbounds [32 x i8]* %b, i64 0, i64 0
189 call void @g(i8* %arraydecay) #2