; load
-define i8 @t1() nounwind uwtable ssp {
+define i8 @t1() nounwind {
; ELF64: t1
%1 = load i8, i8* @a, align 1
; ELF64: lbz
ret i8 %2
}
-define i16 @t2() nounwind uwtable ssp {
+define i16 @t2() nounwind {
; ELF64: t2
%1 = load i16, i16* @b, align 2
; ELF64: lhz
ret i16 %2
}
-define i32 @t3() nounwind uwtable ssp {
+define i32 @t3() nounwind {
; ELF64: t3
%1 = load i32, i32* @c, align 4
; ELF64: lwz
ret i32 %2
}
-define i64 @t4() nounwind uwtable ssp {
+define i64 @t4() nounwind {
; ELF64: t4
%1 = load i64, i64* @d, align 4
; ELF64: ld
ret i64 %2
}
-define float @t5() nounwind uwtable ssp {
+define float @t5() nounwind {
; ELF64: t5
%1 = load float, float* @e, align 4
; ELF64: lfs
ret float %2
}
-define double @t6() nounwind uwtable ssp {
+define double @t6() nounwind {
; ELF64: t6
%1 = load double, double* @f, align 8
; ELF64: lfd
; store
-define void @t7(i8 %v) nounwind uwtable ssp {
+define void @t7(i8 %v) nounwind {
; ELF64: t7
%1 = add nsw i8 %v, 1
store i8 %1, i8* @a, align 1
ret void
}
-define void @t8(i16 %v) nounwind uwtable ssp {
+define void @t8(i16 %v) nounwind {
; ELF64: t8
%1 = add nsw i16 %v, 1
store i16 %1, i16* @b, align 2
ret void
}
-define void @t9(i32 %v) nounwind uwtable ssp {
+define void @t9(i32 %v) nounwind {
; ELF64: t9
%1 = add nsw i32 %v, 1
store i32 %1, i32* @c, align 4
ret void
}
-define void @t10(i64 %v) nounwind uwtable ssp {
+define void @t10(i64 %v) nounwind {
; ELF64: t10
%1 = add nsw i64 %v, 1
store i64 %1, i64* @d, align 4
ret void
}
-define void @t11(float %v) nounwind uwtable ssp {
+define void @t11(float %v) nounwind {
; ELF64: t11
%1 = fadd float %v, 1.0
store float %1, float* @e, align 4
ret void
}
-define void @t12(double %v) nounwind uwtable ssp {
+define void @t12(double %v) nounwind {
; ELF64: t12
%1 = fadd double %v, 1.0
store double %1, double* @f, align 8
}
;; lwa requires an offset divisible by 4, so we need lwax here.
-define i64 @t13() nounwind uwtable ssp {
+define i64 @t13() nounwind {
; ELF64: t13
%1 = load i32, i32* getelementptr inbounds (%struct.s, %struct.s* @g, i32 0, i32 1), align 1
%2 = sext i32 %1 to i64
}
;; ld requires an offset divisible by 4, so we need ldx here.
-define i64 @t14() nounwind uwtable ssp {
+define i64 @t14() nounwind {
; ELF64: t14
%1 = load i64, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
; ELF64: li
}
;; std requires an offset divisible by 4, so we need stdx here.
-define void @t15(i64 %v) nounwind uwtable ssp {
+define void @t15(i64 %v) nounwind {
; ELF64: t15
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
}
;; ld requires an offset that fits in 16 bits, so we need ldx here.
-define i64 @t16() nounwind uwtable ssp {
+define i64 @t16() nounwind {
; ELF64: t16
%1 = load i64, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: lis
}
;; std requires an offset that fits in 16 bits, so we need stdx here.
-define void @t17(i64 %v) nounwind uwtable ssp {
+define void @t17(i64 %v) nounwind {
; ELF64: t17
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8