1 ; RUN: llc -mtriple=armv7-none-linux-gnueabi < %s | FileCheck %s
3 define void @foo(i64* %addr) {
4 %val1 = tail call i64 asm sideeffect "ldrd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
5 %val2 = tail call i64 asm sideeffect "ldrd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
6 %val3 = tail call i64 asm sideeffect "ldrd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
7 %val4 = tail call i64 asm sideeffect "ldrd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
8 %val5 = tail call i64 asm sideeffect "ldrd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
9 %val6 = tail call i64 asm sideeffect "ldrd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
10 %val7 = tail call i64 asm sideeffect "ldrd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
12 ; Key point is that enough 64-bit paired GPR values are live that
13 ; one of them has to be spilled. This used to cause an abort because
14 ; an LDMIA was created with both a FrameIndex and an offset, which
17 ; We also want to ensure the register scavenger is working (i.e. an
18 ; offset from sp can be generated), so we need two spills.
19 ; CHECK: add [[ADDRREG:[a-z0-9]+]], sp, #{{[0-9]+}}
20 ; CHECK: stm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
21 ; CHECK: stm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
23 ; In principle LLVM may have to recalculate the offset. At the moment
24 ; it reuses the original though.
25 ; CHECK: ldm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
26 ; CHECK: ldm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
28 store volatile i64 %val1, i64* %addr
29 store volatile i64 %val2, i64* %addr
30 store volatile i64 %val3, i64* %addr
31 store volatile i64 %val4, i64* %addr
32 store volatile i64 %val5, i64* %addr
33 store volatile i64 %val6, i64* %addr
34 store volatile i64 %val7, i64* %addr