1 ; RUN: llc -march=arm64 < %s | FileCheck %s
4 @object = external hidden global i64, section "__DATA, __objc_ivar", align 8
8 ; CHECK: ldr xzr, [x{{[0-9]+}}, #8]
11 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 1
12 %tmp = load volatile i64, i64* %incdec.ptr, align 8
16 ; base + offset (> imm9)
18 ; CHECK: sub [[ADDREG:x[0-9]+]], x{{[0-9]+}}, #264
23 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 -33
24 %tmp = load volatile i64, i64* %incdec.ptr, align 8
28 ; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
30 ; CHECK: ldr xzr, [x{{[0-9]+}}, #32760]
33 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4095
34 %tmp = load volatile i64, i64* %incdec.ptr, align 8
38 ; base + unsigned offset (> imm12 * size of type in bytes)
40 ; CHECK: orr w[[NUM:[0-9]+]], wzr, #0x8000
41 ; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
44 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4096
45 %tmp = load volatile i64, i64* %incdec.ptr, align 8
51 ; CHECK: ldr xzr, [x{{[0-9]+}}, x{{[0-9]+}}, lsl #3]
53 define void @t5(i64 %a) {
54 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a
55 %tmp = load volatile i64, i64* %incdec.ptr, align 8
61 ; CHECK: add [[ADDREG:x[0-9]+]], x{{[0-9]+}}, x{{[0-9]+}}, lsl #3
62 ; CHECK-NEXT: orr w[[NUM:[0-9]+]], wzr, #0x8000
63 ; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
65 define void @t6(i64 %a) {
66 %tmp1 = getelementptr inbounds i64, i64* @object, i64 %a
67 %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096
68 %tmp = load volatile i64, i64* %incdec.ptr, align 8
72 ; Test base + wide immediate
73 define void @t7(i64 %a) {
75 ; CHECK: orr w[[NUM:[0-9]+]], wzr, #0xffff
76 ; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
77 %1 = add i64 %a, 65535 ;0xffff
78 %2 = inttoptr i64 %1 to i64*
79 %3 = load volatile i64, i64* %2, align 8
83 define void @t8(i64 %a) {
85 ; CHECK: movn [[REG:x[0-9]+]], #0x1235
86 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
87 %1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca
88 %2 = inttoptr i64 %1 to i64*
89 %3 = load volatile i64, i64* %2, align 8
93 define void @t9(i64 %a) {
95 ; CHECK: movn [[REG:x[0-9]+]], #0x1235, lsl #16
96 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
97 %1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff
98 %2 = inttoptr i64 %1 to i64*
99 %3 = load volatile i64, i64* %2, align 8
103 define void @t10(i64 %a) {
105 ; CHECK: movz [[REG:x[0-9]+]], #0x123, lsl #48
106 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
107 %1 = add i64 %a, 81909218222800896 ;0x123000000000000
108 %2 = inttoptr i64 %1 to i64*
109 %3 = load volatile i64, i64* %2, align 8
113 define void @t11(i64 %a) {
115 ; CHECK: movz w[[NUM:[0-9]+]], #0x123, lsl #16
116 ; CHECK: movk w[[NUM:[0-9]+]], #0x4567
117 ; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
118 %1 = add i64 %a, 19088743 ;0x1234567
119 %2 = inttoptr i64 %1 to i64*
120 %3 = load volatile i64, i64* %2, align 8
124 ; Test some boundaries that should not use movz/movn/orr
125 define void @t12(i64 %a) {
127 ; CHECK: add [[REG:x[0-9]+]], x0, #4095
128 ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
129 %1 = add i64 %a, 4095 ;0xfff
130 %2 = inttoptr i64 %1 to i64*
131 %3 = load volatile i64, i64* %2, align 8
135 define void @t13(i64 %a) {
137 ; CHECK: sub [[REG:x[0-9]+]], x0, #4095
138 ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
139 %1 = add i64 %a, -4095 ;-0xfff
140 %2 = inttoptr i64 %1 to i64*
141 %3 = load volatile i64, i64* %2, align 8
145 define void @t14(i64 %a) {
147 ; CHECK: add [[REG:x[0-9]+]], x0, #291, lsl #12
148 ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
149 %1 = add i64 %a, 1191936 ;0x123000
150 %2 = inttoptr i64 %1 to i64*
151 %3 = load volatile i64, i64* %2, align 8
155 define void @t15(i64 %a) {
157 ; CHECK: sub [[REG:x[0-9]+]], x0, #291, lsl #12
158 ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
159 %1 = add i64 %a, -1191936 ;0xFFFFFFFFFFEDD000
160 %2 = inttoptr i64 %1 to i64*
161 %3 = load volatile i64, i64* %2, align 8
165 define void @t16(i64 %a) {
167 ; CHECK: ldr xzr, [x0, #28672]
168 %1 = add i64 %a, 28672 ;0x7000
169 %2 = inttoptr i64 %1 to i64*
170 %3 = load volatile i64, i64* %2, align 8
174 define void @t17(i64 %a) {
176 ; CHECK: ldur xzr, [x0, #-256]
177 %1 = add i64 %a, -256 ;-0x100
178 %2 = inttoptr i64 %1 to i64*
179 %3 = load volatile i64, i64* %2, align 8