; Test add with non-legal types
-define void @add_i8(i8 %a, i8 %b) nounwind ssp {
+define void @add_i8(i8 %a, i8 %b) nounwind {
entry:
; ELF64: add_i8
%a.addr = alloca i8, align 4
ret void
}
-define void @add_i8_imm(i8 %a) nounwind ssp {
+define void @add_i8_imm(i8 %a) nounwind {
entry:
; ELF64: add_i8_imm
%a.addr = alloca i8, align 4
ret void
}
-define void @add_i16(i16 %a, i16 %b) nounwind ssp {
+define void @add_i16(i16 %a, i16 %b) nounwind {
entry:
; ELF64: add_i16
%a.addr = alloca i16, align 4
ret void
}
-define void @add_i16_imm(i16 %a, i16 %b) nounwind ssp {
+define void @add_i16_imm(i16 %a, i16 %b) nounwind {
entry:
; ELF64: add_i16_imm
%a.addr = alloca i16, align 4
; Test or with non-legal types
-define void @or_i8(i8 %a, i8 %b) nounwind ssp {
+define void @or_i8(i8 %a, i8 %b) nounwind {
entry:
; ELF64: or_i8
%a.addr = alloca i8, align 4
ret void
}
-define void @or_i8_imm(i8 %a) nounwind ssp {
+define void @or_i8_imm(i8 %a) nounwind {
entry:
; ELF64: or_i8_imm
%a.addr = alloca i8, align 4
ret void
}
-define void @or_i16(i16 %a, i16 %b) nounwind ssp {
+define void @or_i16(i16 %a, i16 %b) nounwind {
entry:
; ELF64: or_i16
%a.addr = alloca i16, align 4
ret void
}
-define void @or_i16_imm(i16 %a) nounwind ssp {
+define void @or_i16_imm(i16 %a) nounwind {
entry:
; ELF64: or_i16_imm
%a.addr = alloca i16, align 4
; Test sub with non-legal types
-define void @sub_i8(i8 %a, i8 %b) nounwind ssp {
+define void @sub_i8(i8 %a, i8 %b) nounwind {
entry:
; ELF64: sub_i8
%a.addr = alloca i8, align 4
ret void
}
-define void @sub_i8_imm(i8 %a) nounwind ssp {
+define void @sub_i8_imm(i8 %a) nounwind {
entry:
; ELF64: sub_i8_imm
%a.addr = alloca i8, align 4
ret void
}
-define void @sub_i16(i16 %a, i16 %b) nounwind ssp {
+define void @sub_i16(i16 %a, i16 %b) nounwind {
entry:
; ELF64: sub_i16
%a.addr = alloca i16, align 4
ret void
}
-define void @sub_i16_imm(i16 %a) nounwind ssp {
+define void @sub_i16_imm(i16 %a) nounwind {
entry:
; ELF64: sub_i16_imm
%a.addr = alloca i16, align 4
ret void
}
-define void @sub_i16_badimm(i16 %a) nounwind ssp {
+define void @sub_i16_badimm(i16 %a) nounwind {
entry:
; ELF64: sub_i16_imm
%a.addr = alloca i16, align 4
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
-define i32 @t1(i32 %a, i32 %b) nounwind uwtable ssp {
+define i32 @t1(i32 %a, i32 %b) nounwind {
entry:
; ELF64: t1
%x = add i32 %a, %b
declare signext i8 @t7();
declare zeroext i8 @t8();
-define i32 @t10(i32 %argc, i8** nocapture %argv) {
+define i32 @t10(i32 %argc, i8** nocapture %argv) nounwind {
entry:
; ELF64: t10
%call = call i32 @bar(i8 zeroext 0, i8 zeroext -8, i8 zeroext -69, i8 zeroext 28, i8 zeroext 40, i8 zeroext -70)
; ret i32 %tmp1
;}
-declare void @float_foo(float %f) ssp
+declare void @float_foo(float %f)
-define void @float_const() ssp {
+define void @float_const() nounwind {
entry:
; ELF64: float_const
call void @float_foo(float 0x401C666660000000)
ret void
}
-define void @float_reg(float %dummy, float %f) ssp {
+define void @float_reg(float %dummy, float %f) nounwind {
entry:
; ELF64: float_reg
call void @float_foo(float %f)
ret void
}
-declare void @double_foo(double %d) ssp
+declare void @double_foo(double %d)
-define void @double_const() ssp {
+define void @double_const() nounwind {
entry:
; ELF64: double_const
call void @double_foo(double 0x1397723CCABD0000401C666660000000)
ret void
}
-define void @double_reg(double %dummy, double %d) ssp {
+define void @double_reg(double %dummy, double %d) nounwind {
entry:
; ELF64: double_reg
call void @double_foo(double %d)
; When fastisel better supports VSX fix up this test case.
;
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
-define void @t1a(float %a) uwtable ssp {
+define void @t1a(float %a) nounwind {
entry:
; ELF64: t1a
%cmp = fcmp oeq float %a, 0.000000e+00
declare void @foo()
-define void @t1b(float %a) uwtable ssp {
+define void @t1b(float %a) nounwind {
entry:
; ELF64: t1b
%cmp = fcmp oeq float %a, -0.000000e+00
ret void
}
-define void @t2a(double %a) uwtable ssp {
+define void @t2a(double %a) nounwind {
entry:
; ELF64: t2a
%cmp = fcmp oeq double %a, 0.000000e+00
ret void
}
-define void @t2b(double %a) uwtable ssp {
+define void @t2b(double %a) nounwind {
entry:
; ELF64: t2b
%cmp = fcmp oeq double %a, -0.000000e+00
ret void
}
-define void @t4(i8 signext %a) uwtable ssp {
+define void @t4(i8 signext %a) nounwind {
entry:
; ELF64: t4
%cmp = icmp eq i8 %a, -1
ret void
}
-define void @t5(i8 zeroext %a) uwtable ssp {
+define void @t5(i8 zeroext %a) nounwind {
entry:
; ELF64: t5
%cmp = icmp eq i8 %a, 1
ret void
}
-define void @t6(i16 signext %a) uwtable ssp {
+define void @t6(i16 signext %a) nounwind {
entry:
; ELF64: t6
%cmp = icmp eq i16 %a, -1
ret void
}
-define void @t7(i16 zeroext %a) uwtable ssp {
+define void @t7(i16 zeroext %a) nounwind {
entry:
; ELF64: t7
%cmp = icmp eq i16 %a, 1
ret void
}
-define void @t8(i32 %a) uwtable ssp {
+define void @t8(i32 %a) nounwind {
entry:
; ELF64: t8
%cmp = icmp eq i32 %a, -1
ret void
}
-define void @t9(i32 %a) uwtable ssp {
+define void @t9(i32 %a) nounwind {
entry:
; ELF64: t9
%cmp = icmp eq i32 %a, 1
ret void
}
-define void @t10(i32 %a) uwtable ssp {
+define void @t10(i32 %a) nounwind {
entry:
; ELF64: t10
%cmp = icmp eq i32 %a, 384
ret void
}
-define void @t11(i32 %a) uwtable ssp {
+define void @t11(i32 %a) nounwind {
entry:
; ELF64: t11
%cmp = icmp eq i32 %a, 4096
ret void
}
-define void @t12(i8 %a) uwtable ssp {
+define void @t12(i8 %a) nounwind {
entry:
; ELF64: t12
%cmp = icmp ugt i8 %a, -113
ret void
}
-define void @t14(i64 %a) uwtable ssp {
+define void @t14(i64 %a) nounwind {
entry:
; ELF64: t14
%cmp = icmp eq i64 %a, -1
ret void
}
-define void @t15(i64 %a) uwtable ssp {
+define void @t15(i64 %a) nounwind {
entry:
; ELF64: t15
%cmp = icmp eq i64 %a, 1
ret void
}
-define void @t16(i64 %a) uwtable ssp {
+define void @t16(i64 %a) nounwind {
entry:
; ELF64: t16
%cmp = icmp eq i64 %a, 384
ret void
}
-define void @t17(i64 %a) uwtable ssp {
+define void @t17(i64 %a) nounwind {
entry:
; ELF64: t17
%cmp = icmp eq i64 %a, 32768
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
-define zeroext i1 @testi1(i8 %in) nounwind uwtable ssp {
+define zeroext i1 @testi1(i8 %in) nounwind {
entry:
%c = icmp eq i8 %in, 5
br i1 %c, label %true, label %false
; Test sitofp
-define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp {
+define void @sitofp_double_i32(i32 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i32
%b.addr = alloca double, align 8
ret void
}
-define void @sitofp_double_i64(i64 %a, double %b) nounwind ssp {
+define void @sitofp_double_i64(i64 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i64
%b.addr = alloca double, align 8
ret void
}
-define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp {
+define void @sitofp_double_i16(i16 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i16
%b.addr = alloca double, align 8
ret void
}
-define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp {
+define void @sitofp_double_i8(i8 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i8
%b.addr = alloca double, align 8
; Test fptosi
-define void @fptosi_float_i32(float %a) nounwind ssp {
+define void @fptosi_float_i32(float %a) nounwind {
entry:
; ELF64: fptosi_float_i32
%b.addr = alloca i32, align 4
ret void
}
-define void @fptosi_float_i64(float %a) nounwind ssp {
+define void @fptosi_float_i64(float %a) nounwind {
entry:
; ELF64: fptosi_float_i64
%b.addr = alloca i64, align 4
ret void
}
-define void @fptosi_double_i32(double %a) nounwind ssp {
+define void @fptosi_double_i32(double %a) nounwind {
entry:
; ELF64: fptosi_double_i32
%b.addr = alloca i32, align 8
ret void
}
-define void @fptosi_double_i64(double %a) nounwind ssp {
+define void @fptosi_double_i64(double %a) nounwind {
entry:
; ELF64: fptosi_double_i64
%b.addr = alloca i64, align 8
; Test fptoui
-define void @fptoui_float_i32(float %a) nounwind ssp {
+define void @fptoui_float_i32(float %a) nounwind {
entry:
; ELF64: fptoui_float_i32
%b.addr = alloca i32, align 4
ret void
}
-define void @fptoui_double_i32(double %a) nounwind ssp {
+define void @fptoui_double_i32(double %a) nounwind {
entry:
; ELF64: fptoui_double_i32
%b.addr = alloca i32, align 8
; Test sitofp
-define void @sitofp_single_i64(i64 %a, float %b) nounwind ssp {
+define void @sitofp_single_i64(i64 %a, float %b) nounwind {
entry:
; ELF64: sitofp_single_i64
; ELF64LE: sitofp_single_i64
ret void
}
-define void @sitofp_single_i32(i32 %a, float %b) nounwind ssp {
+define void @sitofp_single_i32(i32 %a, float %b) nounwind {
entry:
; ELF64: sitofp_single_i32
; ELF64LE: sitofp_single_i32
ret void
}
-define void @sitofp_single_i16(i16 %a, float %b) nounwind ssp {
+define void @sitofp_single_i16(i16 %a, float %b) nounwind {
entry:
; ELF64: sitofp_single_i16
; ELF64LE: sitofp_single_i16
ret void
}
-define void @sitofp_single_i8(i8 %a) nounwind ssp {
+define void @sitofp_single_i8(i8 %a) nounwind {
entry:
; ELF64: sitofp_single_i8
; ELF64LE: sitofp_single_i8
ret void
}
-define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp {
+define void @sitofp_double_i32(i32 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i32
; ELF64LE: sitofp_double_i32
ret void
}
-define void @sitofp_double_i64(i64 %a, double %b) nounwind ssp {
+define void @sitofp_double_i64(i64 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i64
; ELF64LE: sitofp_double_i64
ret void
}
-define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp {
+define void @sitofp_double_i16(i16 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i16
; ELF64LE: sitofp_double_i16
ret void
}
-define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp {
+define void @sitofp_double_i8(i8 %a, double %b) nounwind {
entry:
; ELF64: sitofp_double_i8
; ELF64LE: sitofp_double_i8
; Test uitofp
-define void @uitofp_single_i64(i64 %a, float %b) nounwind ssp {
+define void @uitofp_single_i64(i64 %a, float %b) nounwind {
entry:
; ELF64: uitofp_single_i64
; ELF64LE: uitofp_single_i64
ret void
}
-define void @uitofp_single_i32(i32 %a, float %b) nounwind ssp {
+define void @uitofp_single_i32(i32 %a, float %b) nounwind {
entry:
; ELF64: uitofp_single_i32
; ELF64LE: uitofp_single_i32
ret void
}
-define void @uitofp_single_i16(i16 %a, float %b) nounwind ssp {
+define void @uitofp_single_i16(i16 %a, float %b) nounwind {
entry:
; ELF64: uitofp_single_i16
; ELF64LE: uitofp_single_i16
ret void
}
-define void @uitofp_single_i8(i8 %a) nounwind ssp {
+define void @uitofp_single_i8(i8 %a) nounwind {
entry:
; ELF64: uitofp_single_i8
; ELF64LE: uitofp_single_i8
ret void
}
-define void @uitofp_double_i64(i64 %a, double %b) nounwind ssp {
+define void @uitofp_double_i64(i64 %a, double %b) nounwind {
entry:
; ELF64: uitofp_double_i64
; ELF64LE: uitofp_double_i64
ret void
}
-define void @uitofp_double_i32(i32 %a, double %b) nounwind ssp {
+define void @uitofp_double_i32(i32 %a, double %b) nounwind {
entry:
; ELF64: uitofp_double_i32
; ELF64LE: uitofp_double_i32
ret void
}
-define void @uitofp_double_i16(i16 %a, double %b) nounwind ssp {
+define void @uitofp_double_i16(i16 %a, double %b) nounwind {
entry:
; ELF64: uitofp_double_i16
; ELF64LE: uitofp_double_i16
ret void
}
-define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp {
+define void @uitofp_double_i8(i8 %a, double %b) nounwind {
entry:
; ELF64: uitofp_double_i8
; ELF64LE: uitofp_double_i8
; Test fptosi
-define void @fptosi_float_i32(float %a) nounwind ssp {
+define void @fptosi_float_i32(float %a) nounwind {
entry:
; ELF64: fptosi_float_i32
; ELF64LE: fptosi_float_i32
ret void
}
-define void @fptosi_float_i64(float %a) nounwind ssp {
+define void @fptosi_float_i64(float %a) nounwind {
entry:
; ELF64: fptosi_float_i64
; ELF64LE: fptosi_float_i64
ret void
}
-define void @fptosi_double_i32(double %a) nounwind ssp {
+define void @fptosi_double_i32(double %a) nounwind {
entry:
; ELF64: fptosi_double_i32
; ELF64LE: fptosi_double_i32
ret void
}
-define void @fptosi_double_i64(double %a) nounwind ssp {
+define void @fptosi_double_i64(double %a) nounwind {
entry:
; ELF64: fptosi_double_i64
; ELF64LE: fptosi_double_i64
; Test fptoui
-define void @fptoui_float_i32(float %a) nounwind ssp {
+define void @fptoui_float_i32(float %a) nounwind {
entry:
; ELF64: fptoui_float_i32
; ELF64LE: fptoui_float_i32
ret void
}
-define void @fptoui_float_i64(float %a) nounwind ssp {
+define void @fptoui_float_i64(float %a) nounwind {
entry:
; ELF64: fptoui_float_i64
; ELF64LE: fptoui_float_i64
ret void
}
-define void @fptoui_double_i32(double %a) nounwind ssp {
+define void @fptoui_double_i32(double %a) nounwind {
entry:
; ELF64: fptoui_double_i32
; ELF64LE: fptoui_double_i32
ret void
}
-define void @fptoui_double_i64(double %a) nounwind ssp {
+define void @fptoui_double_i64(double %a) nounwind {
entry:
; ELF64: fptoui_double_i64
; ELF64LE: fptoui_double_i64
ret void
}
-define internal i32 @_Z13get_global_idj(i32 %dim) nounwind ssp {
+define internal i32 @_Z13get_global_idj(i32 %dim) nounwind {
entry:
ret i32 undef
}
-define void @wrap(i8 addrspace(1)* addrspace(1)* %arglist, i32 addrspace(1)* %gtid) nounwind ssp {
+define void @wrap(i8 addrspace(1)* addrspace(1)* %arglist, i32 addrspace(1)* %gtid) nounwind {
entry:
call void @stretch(<4 x i8> addrspace(1)* undef, <4 x i8> addrspace(1)* undef, i32 undef, i32 undef, i32 undef, i32 undef, <2 x float> undef, <4 x float> undef)
ret void
; zext
-define i32 @zext_8_32(i8 %a) nounwind ssp {
+define i32 @zext_8_32(i8 %a) nounwind {
; ELF64: zext_8_32
%r = zext i8 %a to i32
; ELF64: clrlwi {{[0-9]+}}, {{[0-9]+}}, 24
ret i32 %r
}
-define i32 @zext_16_32(i16 %a) nounwind ssp {
+define i32 @zext_16_32(i16 %a) nounwind {
; ELF64: zext_16_32
%r = zext i16 %a to i32
; ELF64: clrlwi {{[0-9]+}}, {{[0-9]+}}, 16
ret i32 %r
}
-define i64 @zext_8_64(i8 %a) nounwind ssp {
+define i64 @zext_8_64(i8 %a) nounwind {
; ELF64: zext_8_64
%r = zext i8 %a to i64
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56
ret i64 %r
}
-define i64 @zext_16_64(i16 %a) nounwind ssp {
+define i64 @zext_16_64(i16 %a) nounwind {
; ELF64: zext_16_64
%r = zext i16 %a to i64
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48
ret i64 %r
}
-define i64 @zext_32_64(i32 %a) nounwind ssp {
+define i64 @zext_32_64(i32 %a) nounwind {
; ELF64: zext_32_64
%r = zext i32 %a to i64
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 32
; sext
-define i32 @sext_8_32(i8 %a) nounwind ssp {
+define i32 @sext_8_32(i8 %a) nounwind {
; ELF64: sext_8_32
%r = sext i8 %a to i32
; ELF64: extsb
ret i32 %r
}
-define i32 @sext_16_32(i16 %a) nounwind ssp {
+define i32 @sext_16_32(i16 %a) nounwind {
; ELF64: sext_16_32
%r = sext i16 %a to i32
; ELF64: extsh
ret i32 %r
}
-define i64 @sext_8_64(i8 %a) nounwind ssp {
+define i64 @sext_8_64(i8 %a) nounwind {
; ELF64: sext_8_64
%r = sext i8 %a to i64
; ELF64: extsb
ret i64 %r
}
-define i64 @sext_16_64(i16 %a) nounwind ssp {
+define i64 @sext_16_64(i16 %a) nounwind {
; ELF64: sext_16_64
%r = sext i16 %a to i64
; ELF64: extsh
ret i64 %r
}
-define i64 @sext_32_64(i32 %a) nounwind ssp {
+define i64 @sext_32_64(i32 %a) nounwind {
; ELF64: sext_32_64
%r = sext i32 %a to i64
; ELF64: extsw
@b = global i16 2, align 2
@c = global i32 4, align 4
-define void @t1() nounwind uwtable ssp {
+define void @t1() nounwind {
; ELF64: t1
%1 = load i8, i8* @a, align 1
call void @foo1(i8 zeroext %1)
ret void
}
-define void @t2() nounwind uwtable ssp {
+define void @t2() nounwind {
; ELF64: t2
%1 = load i16, i16* @b, align 2
call void @foo2(i16 zeroext %1)
ret void
}
-define void @t2a() nounwind uwtable ssp {
+define void @t2a() nounwind {
; ELF64: t2a
%1 = load i32, i32* @c, align 4
call void @foo3(i32 zeroext %1)
declare void @foo2(i16 zeroext)
declare void @foo3(i32 zeroext)
-define i32 @t3() nounwind uwtable ssp {
+define i32 @t3() nounwind {
; ELF64: t3
%1 = load i8, i8* @a, align 1
%2 = zext i8 %1 to i32
ret i32 %2
}
-define i32 @t4() nounwind uwtable ssp {
+define i32 @t4() nounwind {
; ELF64: t4
%1 = load i16, i16* @b, align 2
%2 = zext i16 %1 to i32
ret i32 %2
}
-define i32 @t5() nounwind uwtable ssp {
+define i32 @t5() nounwind {
; ELF64: t5
%1 = load i16, i16* @b, align 2
%2 = sext i16 %1 to i32
ret i32 %2
}
-define i32 @t6() nounwind uwtable ssp {
+define i32 @t6() nounwind {
; ELF64: t6
%1 = load i8, i8* @a, align 2
%2 = sext i8 %1 to i32
ret i32 %2
}
-define i64 @t7() nounwind uwtable ssp {
+define i64 @t7() nounwind {
; ELF64: t7
%1 = load i8, i8* @a, align 1
%2 = zext i8 %1 to i64
ret i64 %2
}
-define i64 @t8() nounwind uwtable ssp {
+define i64 @t8() nounwind {
; ELF64: t8
%1 = load i16, i16* @b, align 2
%2 = zext i16 %1 to i64
ret i64 %2
}
-define i64 @t9() nounwind uwtable ssp {
+define i64 @t9() nounwind {
; ELF64: t9
%1 = load i16, i16* @b, align 2
%2 = sext i16 %1 to i64
ret i64 %2
}
-define i64 @t10() nounwind uwtable ssp {
+define i64 @t10() nounwind {
; ELF64: t10
%1 = load i8, i8* @a, align 2
%2 = sext i8 %1 to i64
ret i64 %2
}
-define i64 @t11() nounwind uwtable ssp {
+define i64 @t11() nounwind {
; ELF64: t11
%1 = load i32, i32* @c, align 4
%2 = zext i32 %1 to i64
ret i64 %2
}
-define i64 @t12() nounwind uwtable ssp {
+define i64 @t12() nounwind {
; ELF64: t12
%1 = load i32, i32* @c, align 4
%2 = sext i32 %1 to i64
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
-define void @t1(i8* %x) {
+define void @t1(i8* %x) nounwind {
entry:
; ELF64: t1
br label %L0
; load
-define i8 @t1() nounwind uwtable ssp {
+define i8 @t1() nounwind {
; ELF64: t1
%1 = load i8, i8* @a, align 1
; ELF64: lbz
ret i8 %2
}
-define i16 @t2() nounwind uwtable ssp {
+define i16 @t2() nounwind {
; ELF64: t2
%1 = load i16, i16* @b, align 2
; ELF64: lhz
ret i16 %2
}
-define i32 @t3() nounwind uwtable ssp {
+define i32 @t3() nounwind {
; ELF64: t3
%1 = load i32, i32* @c, align 4
; ELF64: lwz
ret i32 %2
}
-define i64 @t4() nounwind uwtable ssp {
+define i64 @t4() nounwind {
; ELF64: t4
%1 = load i64, i64* @d, align 4
; ELF64: ld
ret i64 %2
}
-define float @t5() nounwind uwtable ssp {
+define float @t5() nounwind {
; ELF64: t5
%1 = load float, float* @e, align 4
; ELF64: lfs
ret float %2
}
-define double @t6() nounwind uwtable ssp {
+define double @t6() nounwind {
; ELF64: t6
%1 = load double, double* @f, align 8
; ELF64: lfd
; store
-define void @t7(i8 %v) nounwind uwtable ssp {
+define void @t7(i8 %v) nounwind {
; ELF64: t7
%1 = add nsw i8 %v, 1
store i8 %1, i8* @a, align 1
ret void
}
-define void @t8(i16 %v) nounwind uwtable ssp {
+define void @t8(i16 %v) nounwind {
; ELF64: t8
%1 = add nsw i16 %v, 1
store i16 %1, i16* @b, align 2
ret void
}
-define void @t9(i32 %v) nounwind uwtable ssp {
+define void @t9(i32 %v) nounwind {
; ELF64: t9
%1 = add nsw i32 %v, 1
store i32 %1, i32* @c, align 4
ret void
}
-define void @t10(i64 %v) nounwind uwtable ssp {
+define void @t10(i64 %v) nounwind {
; ELF64: t10
%1 = add nsw i64 %v, 1
store i64 %1, i64* @d, align 4
ret void
}
-define void @t11(float %v) nounwind uwtable ssp {
+define void @t11(float %v) nounwind {
; ELF64: t11
%1 = fadd float %v, 1.0
store float %1, float* @e, align 4
ret void
}
-define void @t12(double %v) nounwind uwtable ssp {
+define void @t12(double %v) nounwind {
; ELF64: t12
%1 = fadd double %v, 1.0
store double %1, double* @f, align 8
}
;; lwa requires an offset divisible by 4, so we need lwax here.
-define i64 @t13() nounwind uwtable ssp {
+define i64 @t13() nounwind {
; ELF64: t13
%1 = load i32, i32* getelementptr inbounds (%struct.s, %struct.s* @g, i32 0, i32 1), align 1
%2 = sext i32 %1 to i64
}
;; ld requires an offset divisible by 4, so we need ldx here.
-define i64 @t14() nounwind uwtable ssp {
+define i64 @t14() nounwind {
; ELF64: t14
%1 = load i64, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
; ELF64: li
}
;; std requires an offset divisible by 4, so we need stdx here.
-define void @t15(i64 %v) nounwind uwtable ssp {
+define void @t15(i64 %v) nounwind {
; ELF64: t15
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
}
;; ld requires an offset that fits in 16 bits, so we need ldx here.
-define i64 @t16() nounwind uwtable ssp {
+define i64 @t16() nounwind {
; ELF64: t16
%1 = load i64, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: lis
}
;; std requires an offset that fits in 16 bits, so we need stdx here.
-define void @t17(i64 %v) nounwind uwtable ssp {
+define void @t17(i64 %v) nounwind {
; ELF64: t17
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; doesn't crash. (It crashed formerly on ARM, and proved useful in
; discovering a bug on PowerPC as well.)
-define i32 @f(i32* %x) nounwind ssp {
+define i32 @f(i32* %x) nounwind {
%y = getelementptr inbounds i32, i32* %x, i32 5000
%tmp103 = load i32, i32* %y, align 4
ret i32 %tmp103
;
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
-define zeroext i1 @rettrue() nounwind uwtable ssp {
+define zeroext i1 @rettrue() nounwind {
entry:
; ELF64-LABEL: rettrue
; ELF64: li 3, 1
ret i1 true
}
-define zeroext i1 @retfalse() nounwind uwtable ssp {
+define zeroext i1 @retfalse() nounwind {
entry:
; ELF64-LABEL: retfalse
; ELF64: li 3, 0
ret i1 false
}
-define signext i1 @retstrue() nounwind uwtable ssp {
+define signext i1 @retstrue() nounwind {
entry:
; ELF64-LABEL: retstrue
; ELF64: li 3, -1
ret i1 true
}
-define signext i1 @retsfalse() nounwind uwtable ssp {
+define signext i1 @retsfalse() nounwind {
entry:
; ELF64-LABEL: retsfalse
; ELF64: li 3, 0
ret i1 false
}
-define signext i8 @ret2(i8 signext %a) nounwind uwtable ssp {
+define signext i8 @ret2(i8 signext %a) nounwind {
entry:
; ELF64-LABEL: ret2
; ELF64: extsb
ret i8 %a
}
-define zeroext i8 @ret3(i8 signext %a) nounwind uwtable ssp {
+define zeroext i8 @ret3(i8 signext %a) nounwind {
entry:
; ELF64-LABEL: ret3
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56
ret i8 %a
}
-define signext i16 @ret4(i16 signext %a) nounwind uwtable ssp {
+define signext i16 @ret4(i16 signext %a) nounwind {
entry:
; ELF64-LABEL: ret4
; ELF64: extsh
ret i16 %a
}
-define zeroext i16 @ret5(i16 signext %a) nounwind uwtable ssp {
+define zeroext i16 @ret5(i16 signext %a) nounwind {
entry:
; ELF64-LABEL: ret5
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48
ret i16 %a
}
-define i16 @ret6(i16 %a) nounwind uwtable ssp {
+define i16 @ret6(i16 %a) nounwind {
entry:
; ELF64-LABEL: ret6
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48
ret i16 %a
}
-define signext i32 @ret7(i32 signext %a) nounwind uwtable ssp {
+define signext i32 @ret7(i32 signext %a) nounwind {
entry:
; ELF64-LABEL: ret7
; ELF64: extsw
ret i32 %a
}
-define zeroext i32 @ret8(i32 signext %a) nounwind uwtable ssp {
+define zeroext i32 @ret8(i32 signext %a) nounwind {
entry:
; ELF64-LABEL: ret8
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 32
ret i32 %a
}
-define i32 @ret9(i32 %a) nounwind uwtable ssp {
+define i32 @ret9(i32 %a) nounwind {
entry:
; ELF64-LABEL: ret9
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 32
ret i32 %a
}
-define i64 @ret10(i64 %a) nounwind uwtable ssp {
+define i64 @ret10(i64 %a) nounwind {
entry:
; ELF64-LABEL: ret10
; ELF64-NOT: exts
ret i64 %a
}
-define float @ret11(float %a) nounwind uwtable ssp {
+define float @ret11(float %a) nounwind {
entry:
; ELF64-LABEL: ret11
; ELF64: blr
ret float %a
}
-define double @ret12(double %a) nounwind uwtable ssp {
+define double @ret12(double %a) nounwind {
entry:
; ELF64-LABEL: ret12
; ELF64: blr
ret double %a
}
-define i8 @ret13() nounwind uwtable ssp {
+define i8 @ret13() nounwind {
entry:
; ELF64-LABEL: ret13
; ELF64: li
ret i8 15;
}
-define i16 @ret14() nounwind uwtable ssp {
+define i16 @ret14() nounwind {
entry:
; ELF64-LABEL: ret14
; ELF64: li
ret i16 -225;
}
-define i32 @ret15() nounwind uwtable ssp {
+define i32 @ret15() nounwind {
entry:
; ELF64-LABEL: ret15
; ELF64: lis
ret i32 278135;
}
-define i64 @ret16() nounwind uwtable ssp {
+define i64 @ret16() nounwind {
entry:
; ELF64-LABEL: ret16
; ELF64: li
ret i64 27813515225;
}
-define float @ret17() nounwind uwtable ssp {
+define float @ret17() nounwind {
entry:
; ELF64-LABEL: ret17
; ELF64: addis
ret float 2.5;
}
-define double @ret18() nounwind uwtable ssp {
+define double @ret18() nounwind {
entry:
; ELF64-LABEL: ret18
; ELF64: addis
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
-define i32 @shl() nounwind ssp {
+define i32 @shl() nounwind {
entry:
; ELF64: shl
; ELF64: slw
ret i32 %shl
}
-define i32 @shl_reg(i32 %src1, i32 %src2) nounwind ssp {
+define i32 @shl_reg(i32 %src1, i32 %src2) nounwind {
entry:
; ELF64: shl_reg
; ELF64: slw
ret i32 %shl
}
-define i32 @lshr() nounwind ssp {
+define i32 @lshr() nounwind {
entry:
; ELF64: lshr
; ELF64: srw
ret i32 %lshr
}
-define i32 @lshr_reg(i32 %src1, i32 %src2) nounwind ssp {
+define i32 @lshr_reg(i32 %src1, i32 %src2) nounwind {
entry:
; ELF64: lshr_reg
; ELF64: srw
ret i32 %lshr
}
-define i32 @ashr() nounwind ssp {
+define i32 @ashr() nounwind {
entry:
; ELF64: ashr
; ELF64: srawi
ret i32 %ashr
}
-define i32 @ashr_reg(i32 %src1, i32 %src2) nounwind ssp {
+define i32 @ashr_reg(i32 %src1, i32 %src2) nounwind {
entry:
; ELF64: ashr_reg
; ELF64: sraw
; sext(a) + sext(b) != sext(a + b)
; RUN: llc -mtriple=powerpc64-unknown-freebsd10.0 %s -O0 -o - | FileCheck %s
-define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
+define zeroext i8 @gep_promotion(i8* %ptr) nounwind {
entry:
%ptr.addr = alloca i8*, align 8
%add = add i8 64, 64 ; 0x40 + 0x40