1 ; Test 64-bit byteswaps from memory to registers.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5 declare i64 @llvm.bswap.i64(i64 %a)
7 ; Check LRVG with no displacement.
8 define i64 @f1(i64 *%src) {
10 ; CHECK: lrvg %r2, 0(%r2)
13 %swapped = call i64 @llvm.bswap.i64(i64 %a)
17 ; Check the high end of the aligned LRVG range.
18 define i64 @f2(i64 *%src) {
20 ; CHECK: lrvg %r2, 524280(%r2)
22 %ptr = getelementptr i64 *%src, i64 65535
24 %swapped = call i64 @llvm.bswap.i64(i64 %a)
28 ; Check the next doubleword up, which needs separate address logic.
29 ; Other sequences besides this one would be OK.
30 define i64 @f3(i64 *%src) {
32 ; CHECK: agfi %r2, 524288
33 ; CHECK: lrvg %r2, 0(%r2)
35 %ptr = getelementptr i64 *%src, i64 65536
37 %swapped = call i64 @llvm.bswap.i64(i64 %a)
41 ; Check the high end of the negative aligned LRVG range.
42 define i64 @f4(i64 *%src) {
44 ; CHECK: lrvg %r2, -8(%r2)
46 %ptr = getelementptr i64 *%src, i64 -1
48 %swapped = call i64 @llvm.bswap.i64(i64 %a)
52 ; Check the low end of the LRVG range.
53 define i64 @f5(i64 *%src) {
55 ; CHECK: lrvg %r2, -524288(%r2)
57 %ptr = getelementptr i64 *%src, i64 -65536
59 %swapped = call i64 @llvm.bswap.i64(i64 %a)
63 ; Check the next doubleword down, which needs separate address logic.
64 ; Other sequences besides this one would be OK.
65 define i64 @f6(i64 *%src) {
67 ; CHECK: agfi %r2, -524296
68 ; CHECK: lrvg %r2, 0(%r2)
70 %ptr = getelementptr i64 *%src, i64 -65537
72 %swapped = call i64 @llvm.bswap.i64(i64 %a)
76 ; Check that LRVG allows an index.
77 define i64 @f7(i64 %src, i64 %index) {
79 ; CHECK: lrvg %r2, 524287({{%r3,%r2|%r2,%r3}})
81 %add1 = add i64 %src, %index
82 %add2 = add i64 %add1, 524287
83 %ptr = inttoptr i64 %add2 to i64 *
85 %swapped = call i64 @llvm.bswap.i64(i64 %a)
89 ; Check that volatile accesses do not use LRVG, which might access the
90 ; storage multple times.
91 define i64 @f8(i64 *%src) {
93 ; CHECK: lg [[REG:%r[0-5]]], 0(%r2)
94 ; CHECK: lrvgr %r2, [[REG]]
96 %a = load volatile i64 *%src
97 %swapped = call i64 @llvm.bswap.i64(i64 %a)
101 ; Test a case where we spill the source of at least one LRVGR. We want
102 ; to use LRVG if possible.
103 define void @f9(i64 *%ptr) {
105 ; CHECK: lrvg {{%r[0-9]+}}, 160(%r15)
107 %val0 = load volatile i64 *%ptr
108 %val1 = load volatile i64 *%ptr
109 %val2 = load volatile i64 *%ptr
110 %val3 = load volatile i64 *%ptr
111 %val4 = load volatile i64 *%ptr
112 %val5 = load volatile i64 *%ptr
113 %val6 = load volatile i64 *%ptr
114 %val7 = load volatile i64 *%ptr
115 %val8 = load volatile i64 *%ptr
116 %val9 = load volatile i64 *%ptr
117 %val10 = load volatile i64 *%ptr
118 %val11 = load volatile i64 *%ptr
119 %val12 = load volatile i64 *%ptr
120 %val13 = load volatile i64 *%ptr
121 %val14 = load volatile i64 *%ptr
122 %val15 = load volatile i64 *%ptr
124 %swapped0 = call i64 @llvm.bswap.i64(i64 %val0)
125 %swapped1 = call i64 @llvm.bswap.i64(i64 %val1)
126 %swapped2 = call i64 @llvm.bswap.i64(i64 %val2)
127 %swapped3 = call i64 @llvm.bswap.i64(i64 %val3)
128 %swapped4 = call i64 @llvm.bswap.i64(i64 %val4)
129 %swapped5 = call i64 @llvm.bswap.i64(i64 %val5)
130 %swapped6 = call i64 @llvm.bswap.i64(i64 %val6)
131 %swapped7 = call i64 @llvm.bswap.i64(i64 %val7)
132 %swapped8 = call i64 @llvm.bswap.i64(i64 %val8)
133 %swapped9 = call i64 @llvm.bswap.i64(i64 %val9)
134 %swapped10 = call i64 @llvm.bswap.i64(i64 %val10)
135 %swapped11 = call i64 @llvm.bswap.i64(i64 %val11)
136 %swapped12 = call i64 @llvm.bswap.i64(i64 %val12)
137 %swapped13 = call i64 @llvm.bswap.i64(i64 %val13)
138 %swapped14 = call i64 @llvm.bswap.i64(i64 %val14)
139 %swapped15 = call i64 @llvm.bswap.i64(i64 %val15)
141 store volatile i64 %val0, i64 *%ptr
142 store volatile i64 %val1, i64 *%ptr
143 store volatile i64 %val2, i64 *%ptr
144 store volatile i64 %val3, i64 *%ptr
145 store volatile i64 %val4, i64 *%ptr
146 store volatile i64 %val5, i64 *%ptr
147 store volatile i64 %val6, i64 *%ptr
148 store volatile i64 %val7, i64 *%ptr
149 store volatile i64 %val8, i64 *%ptr
150 store volatile i64 %val9, i64 *%ptr
151 store volatile i64 %val10, i64 *%ptr
152 store volatile i64 %val11, i64 *%ptr
153 store volatile i64 %val12, i64 *%ptr
154 store volatile i64 %val13, i64 *%ptr
155 store volatile i64 %val14, i64 *%ptr
156 store volatile i64 %val15, i64 *%ptr
158 store volatile i64 %swapped0, i64 *%ptr
159 store volatile i64 %swapped1, i64 *%ptr
160 store volatile i64 %swapped2, i64 *%ptr
161 store volatile i64 %swapped3, i64 *%ptr
162 store volatile i64 %swapped4, i64 *%ptr
163 store volatile i64 %swapped5, i64 *%ptr
164 store volatile i64 %swapped6, i64 *%ptr
165 store volatile i64 %swapped7, i64 *%ptr
166 store volatile i64 %swapped8, i64 *%ptr
167 store volatile i64 %swapped9, i64 *%ptr
168 store volatile i64 %swapped10, i64 *%ptr
169 store volatile i64 %swapped11, i64 *%ptr
170 store volatile i64 %swapped12, i64 *%ptr
171 store volatile i64 %swapped13, i64 *%ptr
172 store volatile i64 %swapped14, i64 *%ptr
173 store volatile i64 %swapped15, i64 *%ptr