1 ; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
3 define i32 @foo(float %scale, float %scale2) nounwind {
5 %scale.addr = alloca float, align 4
6 %scale2.addr = alloca float, align 4
7 store float %scale, float* %scale.addr, align 4
8 store float %scale2, float* %scale2.addr, align 4
9 %tmp = load float* %scale.addr, align 4
10 %tmp1 = load float* %scale2.addr, align 4
11 call void asm sideeffect "vmul.f32 q0, q0, ${0:y} \0A\09vmul.f32 q1, q1, ${0:y} \0A\09vmul.f32 q1, q0, ${1:y} \0A\09", "w,w,~{q0},~{q1}"(float %tmp, float %tmp1) nounwind
15 define void @f0() nounwind {
19 call void asm sideeffect ".word ${0:B} \0A\09", "i"(i32 0) nounwind
23 define void @f1() nounwind {
27 call void asm sideeffect ".word ${0:L} \0A\09", "i"(i32 -1) nounwind
31 @f2_ptr = internal global i32* @f2_var, align 4
32 @f2_var = external global i32
34 define void @f2() nounwind {
37 ; CHECK: ldr r0, [r{{[0-9]+}}]
38 call void asm sideeffect "ldr r0, [${0:m}]\0A\09", "*m,~{r0}"(i32** @f2_ptr) nounwind
42 @f3_ptr = internal global i64* @f3_var, align 4
43 @f3_var = external global i64
44 @f3_var2 = external global i64
46 define void @f3() nounwind {
49 ; CHECK: stm {{lr|r[0-9]+}}, {[[REG1:(r[0-9]+)]], r{{[0-9]+}}}
50 ; CHECK: adds {{lr|r[0-9]+}}, [[REG1]]
51 ; CHECK: ldm {{lr|r[0-9]+}}, {r{{[0-9]+}}, r{{[0-9]+}}}
52 %tmp = load i64* @f3_var, align 4
53 %tmp1 = load i64* @f3_var2, align 4
54 %0 = call i64 asm sideeffect "stm ${0:m}, ${1:M}\0A\09adds $3, $1\0A\09", "=*m,=r,1,r"(i64** @f3_ptr, i64 %tmp, i64 %tmp1) nounwind
55 store i64 %0, i64* @f3_var, align 4
56 %1 = call i64 asm sideeffect "ldm ${1:m}, ${0:M}\0A\09", "=r,*m"(i64** @f3_ptr) nounwind
57 store i64 %1, i64* @f3_var, align 4
61 define i64 @f4(i64* %val) nounwind {
64 ;CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
65 %0 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [$1]", "=&r,r,*Qo"(i64* %val, i64* %val) nounwind
70 define void @f5(i64 %__pu_val) {
71 call void asm sideeffect "$1", "r,i"(i64 %__pu_val, i32 -14)